repo_name
stringclasses
6 values
pr_number
int64
512
78.9k
pr_title
stringlengths
3
144
pr_description
stringlengths
0
30.3k
author
stringlengths
2
21
date_created
timestamp[ns, tz=UTC]
date_merged
timestamp[ns, tz=UTC]
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
query
stringlengths
17
30.4k
filepath
stringlengths
9
210
before_content
stringlengths
0
112M
after_content
stringlengths
0
112M
label
int64
-1
1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/System.Private.CoreLib/src/System/Reflection/ReflectionContext.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Reflection { public abstract class ReflectionContext { protected ReflectionContext() { } public abstract Assembly MapAssembly(Assembly assembly); public abstract TypeInfo MapType(TypeInfo type); public virtual TypeInfo GetTypeForObject(object value!!) { return MapType(value.GetType().GetTypeInfo()); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Reflection { public abstract class ReflectionContext { protected ReflectionContext() { } public abstract Assembly MapAssembly(Assembly assembly); public abstract TypeInfo MapType(TypeInfo type); public virtual TypeInfo GetTypeForObject(object value!!) { return MapType(value.GetType().GetTypeInfo()); } } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/System.Runtime.InteropServices/src/System/Runtime/CompilerServices/IDispatchConstantAttribute.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.InteropServices; using System.Runtime.Versioning; namespace System.Runtime.CompilerServices { [SupportedOSPlatform("windows")] [AttributeUsage(AttributeTargets.Field | AttributeTargets.Parameter, Inherited = false)] public sealed partial class IDispatchConstantAttribute : CustomConstantAttribute { public IDispatchConstantAttribute() { } public override object Value => new DispatchWrapper(null); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.InteropServices; using System.Runtime.Versioning; namespace System.Runtime.CompilerServices { [SupportedOSPlatform("windows")] [AttributeUsage(AttributeTargets.Field | AttributeTargets.Parameter, Inherited = false)] public sealed partial class IDispatchConstantAttribute : CustomConstantAttribute { public IDispatchConstantAttribute() { } public override object Value => new DispatchWrapper(null); } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/System.Net.Http/src/System/Net/Http/Headers/AuthenticationHeaderValue.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Diagnostics.CodeAnalysis; namespace System.Net.Http.Headers { public class AuthenticationHeaderValue : ICloneable { private readonly string _scheme; private readonly string? _parameter; public string Scheme { get { return _scheme; } } // We simplify parameters by just considering them one string. The caller is responsible for correctly parsing // the string. // The reason is that we can't determine the format of parameters. According to Errata 1959 in RFC 2617 // parameters can be "token", "quoted-string", or "#auth-param" where "auth-param" is defined as // "token "=" ( token | quoted-string )". E.g. take the following BASIC example: // Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== // Due to Base64 encoding we have two final "=". The value is neither a token nor a quoted-string, so it must // be an auth-param according to the RFC definition. But that's also incorrect: auth-param means that we // consider the value before the first "=" as "name" and the final "=" as "value". public string? Parameter { get { return _parameter; } } public AuthenticationHeaderValue(string scheme) : this(scheme, null) { } public AuthenticationHeaderValue(string scheme, string? parameter) { HeaderUtilities.CheckValidToken(scheme, nameof(scheme)); HttpHeaders.CheckContainsNewLine(parameter); _scheme = scheme; _parameter = parameter; } private AuthenticationHeaderValue(AuthenticationHeaderValue source) { Debug.Assert(source != null); _scheme = source._scheme; _parameter = source._parameter; } public override string ToString() { if (string.IsNullOrEmpty(_parameter)) { return _scheme; } return _scheme + " " + _parameter; } public override bool Equals([NotNullWhen(true)] object? obj) { AuthenticationHeaderValue? other = obj as AuthenticationHeaderValue; if (other == null) { return false; } if (string.IsNullOrEmpty(_parameter) && string.IsNullOrEmpty(other._parameter)) { return (string.Equals(_scheme, other._scheme, StringComparison.OrdinalIgnoreCase)); } else { // Since we can't parse the parameter, we use case-sensitive comparison. return string.Equals(_scheme, other._scheme, StringComparison.OrdinalIgnoreCase) && string.Equals(_parameter, other._parameter, StringComparison.Ordinal); } } public override int GetHashCode() { int result = StringComparer.OrdinalIgnoreCase.GetHashCode(_scheme); if (!string.IsNullOrEmpty(_parameter)) { result = result ^ _parameter.GetHashCode(); } return result; } public static AuthenticationHeaderValue Parse(string? input) { int index = 0; return (AuthenticationHeaderValue)GenericHeaderParser.SingleValueAuthenticationParser.ParseValue( input, null, ref index); } public static bool TryParse([NotNullWhen(true)] string? input, [NotNullWhen(true)] out AuthenticationHeaderValue? parsedValue) { int index = 0; parsedValue = null; if (GenericHeaderParser.SingleValueAuthenticationParser.TryParseValue(input, null, ref index, out object? output)) { parsedValue = (AuthenticationHeaderValue)output!; return true; } return false; } internal static int GetAuthenticationLength(string? input, int startIndex, out object? parsedValue) { Debug.Assert(startIndex >= 0); parsedValue = null; if (string.IsNullOrEmpty(input) || (startIndex >= input.Length)) { return 0; } // Parse the scheme string: <scheme> in '<scheme> <parameter>' int schemeLength = HttpRuleParser.GetTokenLength(input, startIndex); if (schemeLength == 0) { return 0; } string? targetScheme = null; switch (schemeLength) { // Avoid allocating a scheme string for the most common cases. case 5: targetScheme = "Basic"; break; case 6: targetScheme = "Digest"; break; case 4: targetScheme = "NTLM"; break; case 9: targetScheme = "Negotiate"; break; } string scheme = targetScheme != null && string.CompareOrdinal(input, startIndex, targetScheme, 0, schemeLength) == 0 ? targetScheme : input.Substring(startIndex, schemeLength); int current = startIndex + schemeLength; int whitespaceLength = HttpRuleParser.GetWhitespaceLength(input, current); current = current + whitespaceLength; if ((current == input.Length) || (input[current] == ',')) { // If we only have a scheme followed by whitespace, we're done. parsedValue = new AuthenticationHeaderValue(scheme); return current - startIndex; } // We need at least one space between the scheme and parameters. If there is no whitespace, then we must // have reached the end of the string (i.e. scheme-only string). if (whitespaceLength == 0) { return 0; } // If we get here, we have a <scheme> followed by a whitespace. Now we expect the following: // '<scheme> <blob>[,<name>=<value>]*[, <otherscheme>...]*': <blob> potentially contains one // or more '=' characters, optionally followed by additional name/value pairs, optionally followed by // other schemes. <blob> may be a quoted string. // We look at the value after ',': if it is <token>=<value> then we have a parameter for <scheme>. // If we have either a <token>-only or <token><whitespace><blob> then we have another scheme. int parameterStartIndex = current; int parameterEndIndex = current; if (!TrySkipFirstBlob(input, ref current, ref parameterEndIndex)) { return 0; } if (current < input.Length) { if (!TryGetParametersEndIndex(input, ref current, ref parameterEndIndex)) { return 0; } } string parameter = input.Substring(parameterStartIndex, parameterEndIndex - parameterStartIndex + 1); parsedValue = new AuthenticationHeaderValue(scheme, parameter); return current - startIndex; } private static bool TrySkipFirstBlob(string input, ref int current, ref int parameterEndIndex) { // Find the delimiter: Note that <blob> in "<scheme> <blob>" may be a token, quoted string, name/value // pair or a Base64 encoded string. So make sure that we don't consider ',' characters within a quoted // string as delimiter. while ((current < input.Length) && (input[current] != ',')) { if (input[current] == '"') { int quotedStringLength; if (HttpRuleParser.GetQuotedStringLength(input, current, out quotedStringLength) != HttpParseResult.Parsed) { // We have a quote but an invalid quoted-string. return false; } current = current + quotedStringLength; parameterEndIndex = current - 1; // -1 because 'current' points to the char after the final '"' } else { int whitespaceLength = HttpRuleParser.GetWhitespaceLength(input, current); // We don't want trailing whitespace to be considered part of the parameter blob. Increment // 'parameterEndIndex' only if we don't have a whitespace. E.g. "Basic AbC= , NTLM" should return // "AbC=" as parameter ignoring the spaces before ','. if (whitespaceLength == 0) { parameterEndIndex = current; current++; } else { current = current + whitespaceLength; } } } return true; } private static bool TryGetParametersEndIndex(string input, ref int parseEndIndex, ref int parameterEndIndex) { Debug.Assert(parseEndIndex < input.Length, "Expected string to have at least 1 char"); Debug.Assert(input[parseEndIndex] == ','); int current = parseEndIndex; do { current++; // skip ',' delimiter current = HeaderUtilities.GetNextNonEmptyOrWhitespaceIndex(input, current, true, out _); if (current == input.Length) { return true; } // Now we have to determine if after ',' we have a list of <name>=<value> pairs that are part of // the auth scheme parameters OR if we have another auth scheme. Either way, after ',' we expect a // valid token that is either the <name> in a <name>=<value> pair OR <scheme> of another scheme. int tokenLength = HttpRuleParser.GetTokenLength(input, current); if (tokenLength == 0) { return false; } current = current + tokenLength; current = current + HttpRuleParser.GetWhitespaceLength(input, current); // If we reached the end of the string or the token is followed by anything but '=', then the parsed // token is another scheme name. The string representing parameters ends before the token (e.g. // "Digest a=b, c=d, NTLM": return scheme "Digest" with parameters string "a=b, c=d"). if ((current == input.Length) || (input[current] != '=')) { return true; } current++; // skip '=' delimiter current = current + HttpRuleParser.GetWhitespaceLength(input, current); int valueLength = NameValueHeaderValue.GetValueLength(input, current); // After '<name>=' we expect a valid <value> (either token or quoted string) if (valueLength == 0) { return false; } // Update parameter end index, since we just parsed a valid <name>=<value> pair that is part of the // parameters string. current = current + valueLength; parameterEndIndex = current - 1; // -1 because 'current' already points to the char after <value> current = current + HttpRuleParser.GetWhitespaceLength(input, current); parseEndIndex = current; // this essentially points to parameterEndIndex + whitespace + next char } while ((current < input.Length) && (input[current] == ',')); return true; } object ICloneable.Clone() { return new AuthenticationHeaderValue(this); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Diagnostics.CodeAnalysis; namespace System.Net.Http.Headers { public class AuthenticationHeaderValue : ICloneable { private readonly string _scheme; private readonly string? _parameter; public string Scheme { get { return _scheme; } } // We simplify parameters by just considering them one string. The caller is responsible for correctly parsing // the string. // The reason is that we can't determine the format of parameters. According to Errata 1959 in RFC 2617 // parameters can be "token", "quoted-string", or "#auth-param" where "auth-param" is defined as // "token "=" ( token | quoted-string )". E.g. take the following BASIC example: // Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== // Due to Base64 encoding we have two final "=". The value is neither a token nor a quoted-string, so it must // be an auth-param according to the RFC definition. But that's also incorrect: auth-param means that we // consider the value before the first "=" as "name" and the final "=" as "value". public string? Parameter { get { return _parameter; } } public AuthenticationHeaderValue(string scheme) : this(scheme, null) { } public AuthenticationHeaderValue(string scheme, string? parameter) { HeaderUtilities.CheckValidToken(scheme, nameof(scheme)); HttpHeaders.CheckContainsNewLine(parameter); _scheme = scheme; _parameter = parameter; } private AuthenticationHeaderValue(AuthenticationHeaderValue source) { Debug.Assert(source != null); _scheme = source._scheme; _parameter = source._parameter; } public override string ToString() { if (string.IsNullOrEmpty(_parameter)) { return _scheme; } return _scheme + " " + _parameter; } public override bool Equals([NotNullWhen(true)] object? obj) { AuthenticationHeaderValue? other = obj as AuthenticationHeaderValue; if (other == null) { return false; } if (string.IsNullOrEmpty(_parameter) && string.IsNullOrEmpty(other._parameter)) { return (string.Equals(_scheme, other._scheme, StringComparison.OrdinalIgnoreCase)); } else { // Since we can't parse the parameter, we use case-sensitive comparison. return string.Equals(_scheme, other._scheme, StringComparison.OrdinalIgnoreCase) && string.Equals(_parameter, other._parameter, StringComparison.Ordinal); } } public override int GetHashCode() { int result = StringComparer.OrdinalIgnoreCase.GetHashCode(_scheme); if (!string.IsNullOrEmpty(_parameter)) { result = result ^ _parameter.GetHashCode(); } return result; } public static AuthenticationHeaderValue Parse(string? input) { int index = 0; return (AuthenticationHeaderValue)GenericHeaderParser.SingleValueAuthenticationParser.ParseValue( input, null, ref index); } public static bool TryParse([NotNullWhen(true)] string? input, [NotNullWhen(true)] out AuthenticationHeaderValue? parsedValue) { int index = 0; parsedValue = null; if (GenericHeaderParser.SingleValueAuthenticationParser.TryParseValue(input, null, ref index, out object? output)) { parsedValue = (AuthenticationHeaderValue)output!; return true; } return false; } internal static int GetAuthenticationLength(string? input, int startIndex, out object? parsedValue) { Debug.Assert(startIndex >= 0); parsedValue = null; if (string.IsNullOrEmpty(input) || (startIndex >= input.Length)) { return 0; } // Parse the scheme string: <scheme> in '<scheme> <parameter>' int schemeLength = HttpRuleParser.GetTokenLength(input, startIndex); if (schemeLength == 0) { return 0; } string? targetScheme = null; switch (schemeLength) { // Avoid allocating a scheme string for the most common cases. case 5: targetScheme = "Basic"; break; case 6: targetScheme = "Digest"; break; case 4: targetScheme = "NTLM"; break; case 9: targetScheme = "Negotiate"; break; } string scheme = targetScheme != null && string.CompareOrdinal(input, startIndex, targetScheme, 0, schemeLength) == 0 ? targetScheme : input.Substring(startIndex, schemeLength); int current = startIndex + schemeLength; int whitespaceLength = HttpRuleParser.GetWhitespaceLength(input, current); current = current + whitespaceLength; if ((current == input.Length) || (input[current] == ',')) { // If we only have a scheme followed by whitespace, we're done. parsedValue = new AuthenticationHeaderValue(scheme); return current - startIndex; } // We need at least one space between the scheme and parameters. If there is no whitespace, then we must // have reached the end of the string (i.e. scheme-only string). if (whitespaceLength == 0) { return 0; } // If we get here, we have a <scheme> followed by a whitespace. Now we expect the following: // '<scheme> <blob>[,<name>=<value>]*[, <otherscheme>...]*': <blob> potentially contains one // or more '=' characters, optionally followed by additional name/value pairs, optionally followed by // other schemes. <blob> may be a quoted string. // We look at the value after ',': if it is <token>=<value> then we have a parameter for <scheme>. // If we have either a <token>-only or <token><whitespace><blob> then we have another scheme. int parameterStartIndex = current; int parameterEndIndex = current; if (!TrySkipFirstBlob(input, ref current, ref parameterEndIndex)) { return 0; } if (current < input.Length) { if (!TryGetParametersEndIndex(input, ref current, ref parameterEndIndex)) { return 0; } } string parameter = input.Substring(parameterStartIndex, parameterEndIndex - parameterStartIndex + 1); parsedValue = new AuthenticationHeaderValue(scheme, parameter); return current - startIndex; } private static bool TrySkipFirstBlob(string input, ref int current, ref int parameterEndIndex) { // Find the delimiter: Note that <blob> in "<scheme> <blob>" may be a token, quoted string, name/value // pair or a Base64 encoded string. So make sure that we don't consider ',' characters within a quoted // string as delimiter. while ((current < input.Length) && (input[current] != ',')) { if (input[current] == '"') { int quotedStringLength; if (HttpRuleParser.GetQuotedStringLength(input, current, out quotedStringLength) != HttpParseResult.Parsed) { // We have a quote but an invalid quoted-string. return false; } current = current + quotedStringLength; parameterEndIndex = current - 1; // -1 because 'current' points to the char after the final '"' } else { int whitespaceLength = HttpRuleParser.GetWhitespaceLength(input, current); // We don't want trailing whitespace to be considered part of the parameter blob. Increment // 'parameterEndIndex' only if we don't have a whitespace. E.g. "Basic AbC= , NTLM" should return // "AbC=" as parameter ignoring the spaces before ','. if (whitespaceLength == 0) { parameterEndIndex = current; current++; } else { current = current + whitespaceLength; } } } return true; } private static bool TryGetParametersEndIndex(string input, ref int parseEndIndex, ref int parameterEndIndex) { Debug.Assert(parseEndIndex < input.Length, "Expected string to have at least 1 char"); Debug.Assert(input[parseEndIndex] == ','); int current = parseEndIndex; do { current++; // skip ',' delimiter current = HeaderUtilities.GetNextNonEmptyOrWhitespaceIndex(input, current, true, out _); if (current == input.Length) { return true; } // Now we have to determine if after ',' we have a list of <name>=<value> pairs that are part of // the auth scheme parameters OR if we have another auth scheme. Either way, after ',' we expect a // valid token that is either the <name> in a <name>=<value> pair OR <scheme> of another scheme. int tokenLength = HttpRuleParser.GetTokenLength(input, current); if (tokenLength == 0) { return false; } current = current + tokenLength; current = current + HttpRuleParser.GetWhitespaceLength(input, current); // If we reached the end of the string or the token is followed by anything but '=', then the parsed // token is another scheme name. The string representing parameters ends before the token (e.g. // "Digest a=b, c=d, NTLM": return scheme "Digest" with parameters string "a=b, c=d"). if ((current == input.Length) || (input[current] != '=')) { return true; } current++; // skip '=' delimiter current = current + HttpRuleParser.GetWhitespaceLength(input, current); int valueLength = NameValueHeaderValue.GetValueLength(input, current); // After '<name>=' we expect a valid <value> (either token or quoted string) if (valueLength == 0) { return false; } // Update parameter end index, since we just parsed a valid <name>=<value> pair that is part of the // parameters string. current = current + valueLength; parameterEndIndex = current - 1; // -1 because 'current' already points to the char after <value> current = current + HttpRuleParser.GetWhitespaceLength(input, current); parseEndIndex = current; // this essentially points to parameterEndIndex + whitespace + next char } while ((current < input.Length) && (input[current] == ',')); return true; } object ICloneable.Clone() { return new AuthenticationHeaderValue(this); } } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/System.Net.Http.WinHttpHandler/src/System/Net/Http/NetEventSource.WinHttpHandler.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics.Tracing; namespace System.Net { [EventSource(Name = "Private.InternalDiagnostics.System.Net.Http.WinHttpHandler", LocalizationResources = "FxResources.System.Net.Http.WinHttpHandler.SR")] internal sealed partial class NetEventSource : EventSource { } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics.Tracing; namespace System.Net { [EventSource(Name = "Private.InternalDiagnostics.System.Net.Http.WinHttpHandler", LocalizationResources = "FxResources.System.Net.Http.WinHttpHandler.SR")] internal sealed partial class NetEventSource : EventSource { } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/System.Reflection.Metadata/tests/TestUtilities/TestMetadataStringDecoder.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Text; namespace System.Reflection.Metadata.Tests { public unsafe delegate string GetString(byte* bytes, int count); public sealed class TestMetadataStringDecoder : MetadataStringDecoder { private readonly GetString _getString; public TestMetadataStringDecoder(Encoding encoding, GetString getString) : base(encoding) { _getString = getString; } public override unsafe string GetString(byte* bytes, int byteCount) { return _getString(bytes, byteCount); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Text; namespace System.Reflection.Metadata.Tests { public unsafe delegate string GetString(byte* bytes, int count); public sealed class TestMetadataStringDecoder : MetadataStringDecoder { private readonly GetString _getString; public TestMetadataStringDecoder(Encoding encoding, GetString getString) : base(encoding) { _getString = getString; } public override unsafe string GetString(byte* bytes, int byteCount) { return _getString(bytes, byteCount); } } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/XsltApiV2/AddParameterEB7.xsl
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"> <xsl:output method="xml" omit-xml-declaration="yes" /> <xsl:param name="param1" select="'default global'"/> <xsl:template match="/"> <xsl:call-template name="Test"> <xsl:with-param name="param1"/> </xsl:call-template> </xsl:template> <xsl:template name="Test"> <result><xsl:value-of select="$param1" /></result> </xsl:template> </xsl:stylesheet>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"> <xsl:output method="xml" omit-xml-declaration="yes" /> <xsl:param name="param1" select="'default global'"/> <xsl:template match="/"> <xsl:call-template name="Test"> <xsl:with-param name="param1"/> </xsl:call-template> </xsl:template> <xsl:template name="Test"> <result><xsl:value-of select="$param1" /></result> </xsl:template> </xsl:stylesheet>
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/Microsoft.Win32.Registry/tests/RegistryKey/RegistryKey_GetValue_str_obj_b.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using Xunit; namespace Microsoft.Win32.RegistryTests { public class RegistryKey_GetValue_str_obj_b : RegistryTestsBase { [Fact] public void NegativeTests() { AssertExtensions.Throws<ArgumentException>("options", () => TestRegistryKey.GetValue(null, null, (RegistryValueOptions)(-1))); AssertExtensions.Throws<ArgumentException>("options", () => TestRegistryKey.GetValue(null, null, (RegistryValueOptions)2)); Assert.Throws<ObjectDisposedException>(() => { TestRegistryKey.Dispose(); TestRegistryKey.GetValue(null, TestData.DefaultValue, RegistryValueOptions.None); }); } [Fact] public void GetDefaultValue() { if (!TestRegistryKey.IsDefaultValueSet()) { Assert.Equal(TestData.DefaultValue, TestRegistryKey.GetValue(null, TestData.DefaultValue, RegistryValueOptions.DoNotExpandEnvironmentNames)); Assert.Equal(TestData.DefaultValue, TestRegistryKey.GetValue(string.Empty, TestData.DefaultValue, RegistryValueOptions.DoNotExpandEnvironmentNames)); } Assert.True(TestRegistryKey.SetDefaultValue(TestData.DefaultValue)); Assert.Equal(TestData.DefaultValue, TestRegistryKey.GetValue(null, null, RegistryValueOptions.DoNotExpandEnvironmentNames)); Assert.Equal(TestData.DefaultValue, TestRegistryKey.GetValue(string.Empty, null, RegistryValueOptions.DoNotExpandEnvironmentNames)); } [Fact] public void ShouldAcceptNullAsDefaultValue() { Assert.Null(TestRegistryKey.GetValue("tt", defaultValue: null, options: RegistryValueOptions.DoNotExpandEnvironmentNames)); } [Fact] public void GetStringValue() { // [] Pass name=Existing key, default value = null const string valueName = "MyTestKey"; const string expected = "This is a test string"; TestRegistryKey.SetValue(valueName, expected, RegistryValueKind.ExpandString); Assert.Equal(expected, TestRegistryKey.GetValue(valueName, null, RegistryValueOptions.DoNotExpandEnvironmentNames).ToString()); TestRegistryKey.DeleteValue(valueName); } public static IEnumerable<object[]> TestExpandableStrings { get { return TestData.TestExpandableStrings; } } [Theory] [MemberData(nameof(TestExpandableStrings))] public void GetExpandableStringValue(string testValue, string expectedValue, RegistryValueOptions getOptions) { const string valueName = "MyTestKey"; TestRegistryKey.SetValue(valueName, testValue, RegistryValueKind.ExpandString); Assert.Equal(expectedValue, TestRegistryKey.GetValue(valueName, null, getOptions).ToString()); TestRegistryKey.DeleteValue(valueName); } [Theory] [InlineData("RegistryKey_GetValue_str_obj_b_MyEnv")] [InlineData("RegistryKey_GetValue_str_obj_b_PathPath")] [InlineData("RegistryKey_GetValue_str_obj_b_Name")] [InlineData("RegistryKey_GetValue_str_obj_b_blah")] [InlineData("RegistryKey_GetValue_str_obj_b_TestKEyyyyyyyyyyyyyy")] public void GetValueWithNewlyCreatedEnvironmentVarables(string varName) { const string valueName = "MyTestKey"; string expectedValue = "%" + varName + "%" + @"\subdirectory\myfile.txt"; Helpers.SetEnvironmentVariable(varName, @"C:\UsedToBeCurrentDirectoryButAnythingWorks"); TestRegistryKey.SetValue(valueName, expectedValue, RegistryValueKind.ExpandString); Assert.Equal(expectedValue, TestRegistryKey.GetValue(valueName, string.Empty, RegistryValueOptions.DoNotExpandEnvironmentNames)); TestRegistryKey.DeleteValue(valueName); } public static IEnumerable<object[]> TestValueTypes { get { return TestData.TestValueTypes; } } [Theory] [MemberData(nameof(TestValueTypes))] public void GetValueWithValueTypes(string valueName, object testValue) { TestRegistryKey.SetValue(valueName, testValue, RegistryValueKind.ExpandString); Assert.Equal(testValue.ToString(), TestRegistryKey.GetValue(valueName, null, RegistryValueOptions.DoNotExpandEnvironmentNames).ToString()); TestRegistryKey.DeleteValue(valueName); } public static IEnumerable<object[]> TestEnvironment { get { return TestData.TestEnvironment; } } [Theory] [MemberData(nameof(TestEnvironment))] public void GetValueWithEnvironmentVariable(string valueName, string envVariableName, string expectedVariableValue) { _ = envVariableName; TestRegistryKey.SetValue(valueName, expectedVariableValue, RegistryValueKind.ExpandString); Assert.Equal(expectedVariableValue, TestRegistryKey.GetValue(valueName, null, RegistryValueOptions.DoNotExpandEnvironmentNames).ToString()); TestRegistryKey.DeleteValue(valueName); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using Xunit; namespace Microsoft.Win32.RegistryTests { public class RegistryKey_GetValue_str_obj_b : RegistryTestsBase { [Fact] public void NegativeTests() { AssertExtensions.Throws<ArgumentException>("options", () => TestRegistryKey.GetValue(null, null, (RegistryValueOptions)(-1))); AssertExtensions.Throws<ArgumentException>("options", () => TestRegistryKey.GetValue(null, null, (RegistryValueOptions)2)); Assert.Throws<ObjectDisposedException>(() => { TestRegistryKey.Dispose(); TestRegistryKey.GetValue(null, TestData.DefaultValue, RegistryValueOptions.None); }); } [Fact] public void GetDefaultValue() { if (!TestRegistryKey.IsDefaultValueSet()) { Assert.Equal(TestData.DefaultValue, TestRegistryKey.GetValue(null, TestData.DefaultValue, RegistryValueOptions.DoNotExpandEnvironmentNames)); Assert.Equal(TestData.DefaultValue, TestRegistryKey.GetValue(string.Empty, TestData.DefaultValue, RegistryValueOptions.DoNotExpandEnvironmentNames)); } Assert.True(TestRegistryKey.SetDefaultValue(TestData.DefaultValue)); Assert.Equal(TestData.DefaultValue, TestRegistryKey.GetValue(null, null, RegistryValueOptions.DoNotExpandEnvironmentNames)); Assert.Equal(TestData.DefaultValue, TestRegistryKey.GetValue(string.Empty, null, RegistryValueOptions.DoNotExpandEnvironmentNames)); } [Fact] public void ShouldAcceptNullAsDefaultValue() { Assert.Null(TestRegistryKey.GetValue("tt", defaultValue: null, options: RegistryValueOptions.DoNotExpandEnvironmentNames)); } [Fact] public void GetStringValue() { // [] Pass name=Existing key, default value = null const string valueName = "MyTestKey"; const string expected = "This is a test string"; TestRegistryKey.SetValue(valueName, expected, RegistryValueKind.ExpandString); Assert.Equal(expected, TestRegistryKey.GetValue(valueName, null, RegistryValueOptions.DoNotExpandEnvironmentNames).ToString()); TestRegistryKey.DeleteValue(valueName); } public static IEnumerable<object[]> TestExpandableStrings { get { return TestData.TestExpandableStrings; } } [Theory] [MemberData(nameof(TestExpandableStrings))] public void GetExpandableStringValue(string testValue, string expectedValue, RegistryValueOptions getOptions) { const string valueName = "MyTestKey"; TestRegistryKey.SetValue(valueName, testValue, RegistryValueKind.ExpandString); Assert.Equal(expectedValue, TestRegistryKey.GetValue(valueName, null, getOptions).ToString()); TestRegistryKey.DeleteValue(valueName); } [Theory] [InlineData("RegistryKey_GetValue_str_obj_b_MyEnv")] [InlineData("RegistryKey_GetValue_str_obj_b_PathPath")] [InlineData("RegistryKey_GetValue_str_obj_b_Name")] [InlineData("RegistryKey_GetValue_str_obj_b_blah")] [InlineData("RegistryKey_GetValue_str_obj_b_TestKEyyyyyyyyyyyyyy")] public void GetValueWithNewlyCreatedEnvironmentVarables(string varName) { const string valueName = "MyTestKey"; string expectedValue = "%" + varName + "%" + @"\subdirectory\myfile.txt"; Helpers.SetEnvironmentVariable(varName, @"C:\UsedToBeCurrentDirectoryButAnythingWorks"); TestRegistryKey.SetValue(valueName, expectedValue, RegistryValueKind.ExpandString); Assert.Equal(expectedValue, TestRegistryKey.GetValue(valueName, string.Empty, RegistryValueOptions.DoNotExpandEnvironmentNames)); TestRegistryKey.DeleteValue(valueName); } public static IEnumerable<object[]> TestValueTypes { get { return TestData.TestValueTypes; } } [Theory] [MemberData(nameof(TestValueTypes))] public void GetValueWithValueTypes(string valueName, object testValue) { TestRegistryKey.SetValue(valueName, testValue, RegistryValueKind.ExpandString); Assert.Equal(testValue.ToString(), TestRegistryKey.GetValue(valueName, null, RegistryValueOptions.DoNotExpandEnvironmentNames).ToString()); TestRegistryKey.DeleteValue(valueName); } public static IEnumerable<object[]> TestEnvironment { get { return TestData.TestEnvironment; } } [Theory] [MemberData(nameof(TestEnvironment))] public void GetValueWithEnvironmentVariable(string valueName, string envVariableName, string expectedVariableValue) { _ = envVariableName; TestRegistryKey.SetValue(valueName, expectedVariableValue, RegistryValueKind.ExpandString); Assert.Equal(expectedVariableValue, TestRegistryKey.GetValue(valueName, null, RegistryValueOptions.DoNotExpandEnvironmentNames).ToString()); TestRegistryKey.DeleteValue(valueName); } } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/System.Composition.TypedParts/src/System/Composition/Debugging/ContainerConfigurationDebuggerProxy.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Composition.Hosting; using System.Composition.Hosting.Core; using System.Composition.TypedParts; using System.Composition.TypedParts.Discovery; using System.Composition.TypedParts.Util; using System.Diagnostics; using System.Reflection; namespace System.Composition.Debugging { internal sealed class ContainerConfigurationDebuggerProxy { private readonly ContainerConfiguration _configuration; private DiscoveredPart[] _discoveredParts; private Type[] _ignoredTypes; public ContainerConfigurationDebuggerProxy(ContainerConfiguration configuration) { _configuration = configuration; } [DebuggerDisplay("Added Providers")] public ExportDescriptorProvider[] AddedExportDescriptorProviders { get { return _configuration.DebugGetAddedExportDescriptorProviders(); } } [DebuggerDisplay("Discovered Parts")] public DiscoveredPart[] DiscoveredParts { get { InitDiscovery(); return _discoveredParts; } } [DebuggerDisplay("Ignored Types")] public Type[] IgnoredTypes { get { InitDiscovery(); return _ignoredTypes; } } private void InitDiscovery() { if (_discoveredParts != null) return; var types = _configuration.DebugGetRegisteredTypes(); var defaultAttributeContext = _configuration.DebugGetDefaultAttributeContext() ?? new DirectAttributeContext(); var discovered = new List<DiscoveredPart>(); var ignored = new List<Type>(); foreach (var typeSet in types) { var ac = typeSet.Item2 ?? defaultAttributeContext; var activationFeatures = TypedPartExportDescriptorProvider.DebugGetActivationFeatures(ac); var inspector = new TypeInspector(ac, activationFeatures); foreach (var type in typeSet.Item1) { DiscoveredPart part; if (inspector.InspectTypeForPart(type.GetTypeInfo(), out part)) discovered.Add(part); else ignored.Add(type); } } _discoveredParts = discovered.ToArray(); _ignoredTypes = ignored.ToArray(); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Composition.Hosting; using System.Composition.Hosting.Core; using System.Composition.TypedParts; using System.Composition.TypedParts.Discovery; using System.Composition.TypedParts.Util; using System.Diagnostics; using System.Reflection; namespace System.Composition.Debugging { internal sealed class ContainerConfigurationDebuggerProxy { private readonly ContainerConfiguration _configuration; private DiscoveredPart[] _discoveredParts; private Type[] _ignoredTypes; public ContainerConfigurationDebuggerProxy(ContainerConfiguration configuration) { _configuration = configuration; } [DebuggerDisplay("Added Providers")] public ExportDescriptorProvider[] AddedExportDescriptorProviders { get { return _configuration.DebugGetAddedExportDescriptorProviders(); } } [DebuggerDisplay("Discovered Parts")] public DiscoveredPart[] DiscoveredParts { get { InitDiscovery(); return _discoveredParts; } } [DebuggerDisplay("Ignored Types")] public Type[] IgnoredTypes { get { InitDiscovery(); return _ignoredTypes; } } private void InitDiscovery() { if (_discoveredParts != null) return; var types = _configuration.DebugGetRegisteredTypes(); var defaultAttributeContext = _configuration.DebugGetDefaultAttributeContext() ?? new DirectAttributeContext(); var discovered = new List<DiscoveredPart>(); var ignored = new List<Type>(); foreach (var typeSet in types) { var ac = typeSet.Item2 ?? defaultAttributeContext; var activationFeatures = TypedPartExportDescriptorProvider.DebugGetActivationFeatures(ac); var inspector = new TypeInspector(ac, activationFeatures); foreach (var type in typeSet.Item1) { DiscoveredPart part; if (inspector.InspectTypeForPart(type.GetTypeInfo(), out part)) discovered.Add(part); else ignored.Add(type); } } _discoveredParts = discovered.ToArray(); _ignoredTypes = ignored.ToArray(); } } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/System.Net.Mail/src/System/Net/Mail/SmtpConnection.Auth.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Net.Mail { internal enum SupportedAuth { None = 0, Login = 1, NTLM = 2, GSSAPI = 4 }; internal sealed partial class SmtpConnection { private bool _serverSupportsEai; private bool _dsnEnabled; #pragma warning disable CS0414 // Field is not used in test project private bool _serverSupportsStartTls; #pragma warning restore CS0414 #if !NO_NTAUTHENTICATION private bool _sawNegotiate; #endif private SupportedAuth _supportedAuth = SupportedAuth.None; private readonly ISmtpAuthenticationModule[] _authenticationModules; // accounts for the '=' or ' ' character after AUTH private const int SizeOfAuthExtension = 4; private static readonly char[] s_authExtensionSplitters = new char[] { ' ', '=' }; private const string AuthExtension = "auth"; private const string AuthLogin = "login"; private const string AuthNtlm = "ntlm"; private const string AuthGssapi = "gssapi"; internal SmtpConnection(ISmtpAuthenticationModule[] authenticationModules) { _authenticationModules = authenticationModules; } internal bool DSNEnabled => _dsnEnabled; internal bool ServerSupportsEai => _serverSupportsEai; internal void ParseExtensions(string[] extensions) { _supportedAuth = SupportedAuth.None; foreach (string extension in extensions) { if (string.Compare(extension, 0, AuthExtension, 0, SizeOfAuthExtension, StringComparison.OrdinalIgnoreCase) == 0) { // remove the AUTH text including the following character // to ensure that split only gets the modules supported string[] authTypes = extension.Remove(0, SizeOfAuthExtension).Split(s_authExtensionSplitters, StringSplitOptions.RemoveEmptyEntries); foreach (string authType in authTypes) { if (string.Equals(authType, AuthLogin, StringComparison.OrdinalIgnoreCase)) { _supportedAuth |= SupportedAuth.Login; } else if (string.Equals(authType, AuthNtlm, StringComparison.OrdinalIgnoreCase)) { _supportedAuth |= SupportedAuth.NTLM; } else if (string.Equals(authType, AuthGssapi, StringComparison.OrdinalIgnoreCase)) { _supportedAuth |= SupportedAuth.GSSAPI; } } } else if (string.Compare(extension, 0, "dsn ", 0, 3, StringComparison.OrdinalIgnoreCase) == 0) { _dsnEnabled = true; } else if (string.Compare(extension, 0, "STARTTLS", 0, 8, StringComparison.OrdinalIgnoreCase) == 0) { _serverSupportsStartTls = true; } else if (string.Compare(extension, 0, "SMTPUTF8", 0, 8, StringComparison.OrdinalIgnoreCase) == 0) { _serverSupportsEai = true; } } } internal bool AuthSupported(ISmtpAuthenticationModule module) { if (module is SmtpLoginAuthenticationModule) { if ((_supportedAuth & SupportedAuth.Login) > 0) { return true; } } #if !NO_NTAUTHENTICATION else if (module is SmtpNegotiateAuthenticationModule) { if ((_supportedAuth & SupportedAuth.GSSAPI) > 0) { _sawNegotiate = true; return true; } } else if (module is SmtpNtlmAuthenticationModule) { // Don't try ntlm if negotiate has been tried if ((!_sawNegotiate && (_supportedAuth & SupportedAuth.NTLM) > 0)) { return true; } } #endif return false; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Net.Mail { internal enum SupportedAuth { None = 0, Login = 1, NTLM = 2, GSSAPI = 4 }; internal sealed partial class SmtpConnection { private bool _serverSupportsEai; private bool _dsnEnabled; #pragma warning disable CS0414 // Field is not used in test project private bool _serverSupportsStartTls; #pragma warning restore CS0414 #if !NO_NTAUTHENTICATION private bool _sawNegotiate; #endif private SupportedAuth _supportedAuth = SupportedAuth.None; private readonly ISmtpAuthenticationModule[] _authenticationModules; // accounts for the '=' or ' ' character after AUTH private const int SizeOfAuthExtension = 4; private static readonly char[] s_authExtensionSplitters = new char[] { ' ', '=' }; private const string AuthExtension = "auth"; private const string AuthLogin = "login"; private const string AuthNtlm = "ntlm"; private const string AuthGssapi = "gssapi"; internal SmtpConnection(ISmtpAuthenticationModule[] authenticationModules) { _authenticationModules = authenticationModules; } internal bool DSNEnabled => _dsnEnabled; internal bool ServerSupportsEai => _serverSupportsEai; internal void ParseExtensions(string[] extensions) { _supportedAuth = SupportedAuth.None; foreach (string extension in extensions) { if (string.Compare(extension, 0, AuthExtension, 0, SizeOfAuthExtension, StringComparison.OrdinalIgnoreCase) == 0) { // remove the AUTH text including the following character // to ensure that split only gets the modules supported string[] authTypes = extension.Remove(0, SizeOfAuthExtension).Split(s_authExtensionSplitters, StringSplitOptions.RemoveEmptyEntries); foreach (string authType in authTypes) { if (string.Equals(authType, AuthLogin, StringComparison.OrdinalIgnoreCase)) { _supportedAuth |= SupportedAuth.Login; } else if (string.Equals(authType, AuthNtlm, StringComparison.OrdinalIgnoreCase)) { _supportedAuth |= SupportedAuth.NTLM; } else if (string.Equals(authType, AuthGssapi, StringComparison.OrdinalIgnoreCase)) { _supportedAuth |= SupportedAuth.GSSAPI; } } } else if (string.Compare(extension, 0, "dsn ", 0, 3, StringComparison.OrdinalIgnoreCase) == 0) { _dsnEnabled = true; } else if (string.Compare(extension, 0, "STARTTLS", 0, 8, StringComparison.OrdinalIgnoreCase) == 0) { _serverSupportsStartTls = true; } else if (string.Compare(extension, 0, "SMTPUTF8", 0, 8, StringComparison.OrdinalIgnoreCase) == 0) { _serverSupportsEai = true; } } } internal bool AuthSupported(ISmtpAuthenticationModule module) { if (module is SmtpLoginAuthenticationModule) { if ((_supportedAuth & SupportedAuth.Login) > 0) { return true; } } #if !NO_NTAUTHENTICATION else if (module is SmtpNegotiateAuthenticationModule) { if ((_supportedAuth & SupportedAuth.GSSAPI) > 0) { _sawNegotiate = true; return true; } } else if (module is SmtpNtlmAuthenticationModule) { // Don't try ntlm if negotiate has been tried if ((!_sawNegotiate && (_supportedAuth & SupportedAuth.NTLM) > 0)) { return true; } } #endif return false; } } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/System.Reflection/tests/AssemblyVersion/Program_1_1_1_0.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Reflection.Tests.AssemblyVersion { public class Program_1_1_1_0 { } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Reflection.Tests.AssemblyVersion { public class Program_1_1_1_0 { } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/System.IO.Ports/tests/SerialPort/ReadByte.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.IO.PortsTests; using System.Text; using System.Threading; using System.Threading.Tasks; using Legacy.Support; using Xunit; namespace System.IO.Ports.Tests { public class ReadByte : PortsTest { //The number of random bytes to receive private const int numRndByte = 8; private enum ReadDataFromEnum { NonBuffered, Buffered, BufferedAndNonBuffered }; #region Test Cases [ConditionalFact(nameof(HasLoopbackOrNullModem))] public void ASCIIEncoding() { Debug.WriteLine("Verifying read with bytes encoded with ASCIIEncoding"); VerifyRead(new ASCIIEncoding()); } [ConditionalFact(nameof(HasLoopbackOrNullModem))] public void UTF8Encoding() { Debug.WriteLine("Verifying read with bytes encoded with UTF8Encoding"); VerifyRead(new UTF8Encoding()); } [ConditionalFact(nameof(HasLoopbackOrNullModem))] public void UTF32Encoding() { Debug.WriteLine("Verifying read with bytes encoded with UTF32Encoding"); VerifyRead(new UTF32Encoding()); } [ConditionalFact(nameof(HasLoopbackOrNullModem))] public void SerialPort_ReadBufferedData() { VerifyRead(Encoding.ASCII, ReadDataFromEnum.Buffered); } [ConditionalFact(nameof(HasLoopbackOrNullModem))] public void SerialPort_IterativeReadBufferedData() { VerifyRead(Encoding.ASCII, ReadDataFromEnum.Buffered); } [ConditionalFact(nameof(HasLoopbackOrNullModem))] public void SerialPort_ReadBufferedAndNonBufferedData() { VerifyRead(Encoding.ASCII, ReadDataFromEnum.BufferedAndNonBuffered); } [ConditionalFact(nameof(HasLoopbackOrNullModem))] public void SerialPort_IterativeReadBufferedAndNonBufferedData() { VerifyRead(Encoding.ASCII, ReadDataFromEnum.BufferedAndNonBuffered); } [ConditionalFact(nameof(HasLoopbackOrNullModem))] public void Read_DataReceivedBeforeTimeout() { using (SerialPort com1 = TCSupport.InitFirstSerialPort()) using (SerialPort com2 = TCSupport.InitSecondSerialPort(com1)) { byte[] byteXmitBuffer = TCSupport.GetRandomBytes(512); byte[] byteRcvBuffer = new byte[byteXmitBuffer.Length]; ASyncRead asyncRead = new ASyncRead(com1); var asyncReadTask = new Task(asyncRead.Read); Debug.WriteLine( "Verifying that ReadByte() will read bytes that have been received after the call to Read was made"); com1.Encoding = Encoding.UTF8; com2.Encoding = Encoding.UTF8; com1.ReadTimeout = 20000; // 20 seconds com1.Open(); if (!com2.IsOpen) //This is necessary since com1 and com2 might be the same port if we are using a loopback com2.Open(); asyncReadTask.Start(); asyncRead.ReadStartedEvent.WaitOne(); //This only tells us that the thread has started to execute code in the method Thread.Sleep(2000); //We need to wait to guarentee that we are executing code in SerialPort com2.Write(byteXmitBuffer, 0, byteXmitBuffer.Length); asyncRead.ReadCompletedEvent.WaitOne(); if (null != asyncRead.Exception) { Fail("Err_04448ajhied Unexpected exception thrown from async read:\n{0}", asyncRead.Exception); } else if (asyncRead.Result != byteXmitBuffer[0]) { Fail("Err_0158ahei Expected ReadChar to read {0}({0:X}) actual {1}({1:X})", byteXmitBuffer[0], asyncRead.Result); } else { Thread.Sleep(1000); //We need to wait for all of the bytes to be received byteRcvBuffer[0] = (byte)asyncRead.Result; int readResult = com1.Read(byteRcvBuffer, 1, byteRcvBuffer.Length - 1); if (1 + readResult != byteXmitBuffer.Length) { Fail("Err_051884ajoedo Expected Read to read {0} bytes actually read {1}", byteXmitBuffer.Length - 1, readResult); } else { for (int i = 0; i < byteXmitBuffer.Length; ++i) { if (byteRcvBuffer[i] != byteXmitBuffer[i]) { Fail( "Err_05188ahed Characters differ at {0} expected:{1}({1:X}) actual:{2}({2:X}) asyncRead.Result={3}", i, byteXmitBuffer[i], byteRcvBuffer[i], asyncRead.Result); } } } } TCSupport.WaitForTaskCompletion(asyncReadTask); } } #endregion #region Verification for Test Cases private void VerifyRead(Encoding encoding) { VerifyRead(encoding, ReadDataFromEnum.NonBuffered); } private void VerifyRead(Encoding encoding, ReadDataFromEnum readDataFrom) { using (SerialPort com1 = TCSupport.InitFirstSerialPort()) using (SerialPort com2 = TCSupport.InitSecondSerialPort(com1)) { Random rndGen = new Random(-55); int bufferSize = numRndByte; byte[] byteXmitBuffer = new byte[bufferSize]; //Genrate random bytes for (int i = 0; i < byteXmitBuffer.Length; i++) { byteXmitBuffer[i] = (byte)rndGen.Next(0, 256); } com1.ReadTimeout = 500; com1.Encoding = encoding; TCSupport.SetHighSpeed(com1, com2); com1.Open(); if (!com2.IsOpen) //This is necessary since com1 and com2 might be the same port if we are using a loopback com2.Open(); switch (readDataFrom) { case ReadDataFromEnum.NonBuffered: VerifyReadNonBuffered(com1, com2, byteXmitBuffer); break; case ReadDataFromEnum.Buffered: VerifyReadBuffered(com1, com2, byteXmitBuffer); break; case ReadDataFromEnum.BufferedAndNonBuffered: VerifyReadBufferedAndNonBuffered(com1, com2, byteXmitBuffer); break; default: throw new ArgumentOutOfRangeException(nameof(readDataFrom)); } } } private void VerifyReadNonBuffered(SerialPort com1, SerialPort com2, byte[] bytesToWrite) { VerifyBytesReadOnCom1FromCom2(com1, com2, bytesToWrite, bytesToWrite); } private void VerifyReadBuffered(SerialPort com1, SerialPort com2, byte[] bytesToWrite) { BufferData(com1, com2, bytesToWrite); PerformReadOnCom1FromCom2(com1, com2, bytesToWrite); } private void VerifyReadBufferedAndNonBuffered(SerialPort com1, SerialPort com2, byte[] bytesToWrite) { byte[] expectedBytes = new byte[(2 * bytesToWrite.Length)]; BufferData(com1, com2, bytesToWrite); Buffer.BlockCopy(bytesToWrite, 0, expectedBytes, 0, bytesToWrite.Length); Buffer.BlockCopy(bytesToWrite, 0, expectedBytes, bytesToWrite.Length, bytesToWrite.Length); VerifyBytesReadOnCom1FromCom2(com1, com2, bytesToWrite, expectedBytes); } private void BufferData(SerialPort com1, SerialPort com2, byte[] bytesToWrite) { com2.Write(bytesToWrite, 0, 1); // Write one byte at the beginning because we are going to read this to buffer the rest of the data com2.Write(bytesToWrite, 0, bytesToWrite.Length); TCSupport.WaitForReadBufferToLoad(com1, bytesToWrite.Length); com1.Read(new char[1], 0, 1); // This should put the rest of the bytes in SerialPorts own internal buffer if (com1.BytesToRead != bytesToWrite.Length) { Fail("Err_7083zaz Expected com1.BytesToRead={0} actual={1}", bytesToWrite.Length, com1.BytesToRead); } } private void VerifyBytesReadOnCom1FromCom2(SerialPort com1, SerialPort com2, byte[] bytesToWrite, byte[] expectedBytes) { com2.Write(bytesToWrite, 0, bytesToWrite.Length); com1.ReadTimeout = 500; Thread.Sleep((int)(((bytesToWrite.Length * 10.0) / com1.BaudRate) * 1000) + 250); PerformReadOnCom1FromCom2(com1, com2, expectedBytes); } private void PerformReadOnCom1FromCom2(SerialPort com1, SerialPort com2, byte[] expectedBytes) { byte[] byteRcvBuffer = new byte[expectedBytes.Length]; int readInt; int i; i = 0; while (true) { try { readInt = com1.ReadByte(); } catch (TimeoutException) { break; } //While their are more bytes to be read if (expectedBytes.Length <= i) { //If we have read in more bytes then were actually sent Fail("ERROR!!!: We have received more bytes then were sent"); break; } byteRcvBuffer[i] = (byte)readInt; if (readInt != expectedBytes[i]) { //If the byte read is not the expected byte Fail("ERROR!!!: Expected to read {0} actual read byte {1}", (int)expectedBytes[i], readInt); } i++; if (expectedBytes.Length - i != com1.BytesToRead) { Fail("ERROR!!!: Expected BytesToRead={0} actual={1}", expectedBytes.Length - i, com1.BytesToRead); } } if (0 != com1.BytesToRead) { Fail("ERROR!!!: Expected BytesToRead=0 actual BytesToRead={0}", com1.BytesToRead); } if (com1.IsOpen) com1.Close(); if (com2.IsOpen) com2.Close(); } public class ASyncRead { private readonly SerialPort _com; private int _result; private readonly AutoResetEvent _readCompletedEvent; private readonly AutoResetEvent _readStartedEvent; private Exception _exception; public ASyncRead(SerialPort com) { _com = com; _result = int.MinValue; _readCompletedEvent = new AutoResetEvent(false); _readStartedEvent = new AutoResetEvent(false); _exception = null; } public void Read() { try { _readStartedEvent.Set(); _result = _com.ReadByte(); } catch (Exception e) { _exception = e; } finally { _readCompletedEvent.Set(); } } public AutoResetEvent ReadStartedEvent { get { return _readStartedEvent; } } public AutoResetEvent ReadCompletedEvent { get { return _readCompletedEvent; } } public int Result { get { return _result; } } public Exception Exception { get { return _exception; } } } #endregion } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.IO.PortsTests; using System.Text; using System.Threading; using System.Threading.Tasks; using Legacy.Support; using Xunit; namespace System.IO.Ports.Tests { public class ReadByte : PortsTest { //The number of random bytes to receive private const int numRndByte = 8; private enum ReadDataFromEnum { NonBuffered, Buffered, BufferedAndNonBuffered }; #region Test Cases [ConditionalFact(nameof(HasLoopbackOrNullModem))] public void ASCIIEncoding() { Debug.WriteLine("Verifying read with bytes encoded with ASCIIEncoding"); VerifyRead(new ASCIIEncoding()); } [ConditionalFact(nameof(HasLoopbackOrNullModem))] public void UTF8Encoding() { Debug.WriteLine("Verifying read with bytes encoded with UTF8Encoding"); VerifyRead(new UTF8Encoding()); } [ConditionalFact(nameof(HasLoopbackOrNullModem))] public void UTF32Encoding() { Debug.WriteLine("Verifying read with bytes encoded with UTF32Encoding"); VerifyRead(new UTF32Encoding()); } [ConditionalFact(nameof(HasLoopbackOrNullModem))] public void SerialPort_ReadBufferedData() { VerifyRead(Encoding.ASCII, ReadDataFromEnum.Buffered); } [ConditionalFact(nameof(HasLoopbackOrNullModem))] public void SerialPort_IterativeReadBufferedData() { VerifyRead(Encoding.ASCII, ReadDataFromEnum.Buffered); } [ConditionalFact(nameof(HasLoopbackOrNullModem))] public void SerialPort_ReadBufferedAndNonBufferedData() { VerifyRead(Encoding.ASCII, ReadDataFromEnum.BufferedAndNonBuffered); } [ConditionalFact(nameof(HasLoopbackOrNullModem))] public void SerialPort_IterativeReadBufferedAndNonBufferedData() { VerifyRead(Encoding.ASCII, ReadDataFromEnum.BufferedAndNonBuffered); } [ConditionalFact(nameof(HasLoopbackOrNullModem))] public void Read_DataReceivedBeforeTimeout() { using (SerialPort com1 = TCSupport.InitFirstSerialPort()) using (SerialPort com2 = TCSupport.InitSecondSerialPort(com1)) { byte[] byteXmitBuffer = TCSupport.GetRandomBytes(512); byte[] byteRcvBuffer = new byte[byteXmitBuffer.Length]; ASyncRead asyncRead = new ASyncRead(com1); var asyncReadTask = new Task(asyncRead.Read); Debug.WriteLine( "Verifying that ReadByte() will read bytes that have been received after the call to Read was made"); com1.Encoding = Encoding.UTF8; com2.Encoding = Encoding.UTF8; com1.ReadTimeout = 20000; // 20 seconds com1.Open(); if (!com2.IsOpen) //This is necessary since com1 and com2 might be the same port if we are using a loopback com2.Open(); asyncReadTask.Start(); asyncRead.ReadStartedEvent.WaitOne(); //This only tells us that the thread has started to execute code in the method Thread.Sleep(2000); //We need to wait to guarentee that we are executing code in SerialPort com2.Write(byteXmitBuffer, 0, byteXmitBuffer.Length); asyncRead.ReadCompletedEvent.WaitOne(); if (null != asyncRead.Exception) { Fail("Err_04448ajhied Unexpected exception thrown from async read:\n{0}", asyncRead.Exception); } else if (asyncRead.Result != byteXmitBuffer[0]) { Fail("Err_0158ahei Expected ReadChar to read {0}({0:X}) actual {1}({1:X})", byteXmitBuffer[0], asyncRead.Result); } else { Thread.Sleep(1000); //We need to wait for all of the bytes to be received byteRcvBuffer[0] = (byte)asyncRead.Result; int readResult = com1.Read(byteRcvBuffer, 1, byteRcvBuffer.Length - 1); if (1 + readResult != byteXmitBuffer.Length) { Fail("Err_051884ajoedo Expected Read to read {0} bytes actually read {1}", byteXmitBuffer.Length - 1, readResult); } else { for (int i = 0; i < byteXmitBuffer.Length; ++i) { if (byteRcvBuffer[i] != byteXmitBuffer[i]) { Fail( "Err_05188ahed Characters differ at {0} expected:{1}({1:X}) actual:{2}({2:X}) asyncRead.Result={3}", i, byteXmitBuffer[i], byteRcvBuffer[i], asyncRead.Result); } } } } TCSupport.WaitForTaskCompletion(asyncReadTask); } } #endregion #region Verification for Test Cases private void VerifyRead(Encoding encoding) { VerifyRead(encoding, ReadDataFromEnum.NonBuffered); } private void VerifyRead(Encoding encoding, ReadDataFromEnum readDataFrom) { using (SerialPort com1 = TCSupport.InitFirstSerialPort()) using (SerialPort com2 = TCSupport.InitSecondSerialPort(com1)) { Random rndGen = new Random(-55); int bufferSize = numRndByte; byte[] byteXmitBuffer = new byte[bufferSize]; //Genrate random bytes for (int i = 0; i < byteXmitBuffer.Length; i++) { byteXmitBuffer[i] = (byte)rndGen.Next(0, 256); } com1.ReadTimeout = 500; com1.Encoding = encoding; TCSupport.SetHighSpeed(com1, com2); com1.Open(); if (!com2.IsOpen) //This is necessary since com1 and com2 might be the same port if we are using a loopback com2.Open(); switch (readDataFrom) { case ReadDataFromEnum.NonBuffered: VerifyReadNonBuffered(com1, com2, byteXmitBuffer); break; case ReadDataFromEnum.Buffered: VerifyReadBuffered(com1, com2, byteXmitBuffer); break; case ReadDataFromEnum.BufferedAndNonBuffered: VerifyReadBufferedAndNonBuffered(com1, com2, byteXmitBuffer); break; default: throw new ArgumentOutOfRangeException(nameof(readDataFrom)); } } } private void VerifyReadNonBuffered(SerialPort com1, SerialPort com2, byte[] bytesToWrite) { VerifyBytesReadOnCom1FromCom2(com1, com2, bytesToWrite, bytesToWrite); } private void VerifyReadBuffered(SerialPort com1, SerialPort com2, byte[] bytesToWrite) { BufferData(com1, com2, bytesToWrite); PerformReadOnCom1FromCom2(com1, com2, bytesToWrite); } private void VerifyReadBufferedAndNonBuffered(SerialPort com1, SerialPort com2, byte[] bytesToWrite) { byte[] expectedBytes = new byte[(2 * bytesToWrite.Length)]; BufferData(com1, com2, bytesToWrite); Buffer.BlockCopy(bytesToWrite, 0, expectedBytes, 0, bytesToWrite.Length); Buffer.BlockCopy(bytesToWrite, 0, expectedBytes, bytesToWrite.Length, bytesToWrite.Length); VerifyBytesReadOnCom1FromCom2(com1, com2, bytesToWrite, expectedBytes); } private void BufferData(SerialPort com1, SerialPort com2, byte[] bytesToWrite) { com2.Write(bytesToWrite, 0, 1); // Write one byte at the beginning because we are going to read this to buffer the rest of the data com2.Write(bytesToWrite, 0, bytesToWrite.Length); TCSupport.WaitForReadBufferToLoad(com1, bytesToWrite.Length); com1.Read(new char[1], 0, 1); // This should put the rest of the bytes in SerialPorts own internal buffer if (com1.BytesToRead != bytesToWrite.Length) { Fail("Err_7083zaz Expected com1.BytesToRead={0} actual={1}", bytesToWrite.Length, com1.BytesToRead); } } private void VerifyBytesReadOnCom1FromCom2(SerialPort com1, SerialPort com2, byte[] bytesToWrite, byte[] expectedBytes) { com2.Write(bytesToWrite, 0, bytesToWrite.Length); com1.ReadTimeout = 500; Thread.Sleep((int)(((bytesToWrite.Length * 10.0) / com1.BaudRate) * 1000) + 250); PerformReadOnCom1FromCom2(com1, com2, expectedBytes); } private void PerformReadOnCom1FromCom2(SerialPort com1, SerialPort com2, byte[] expectedBytes) { byte[] byteRcvBuffer = new byte[expectedBytes.Length]; int readInt; int i; i = 0; while (true) { try { readInt = com1.ReadByte(); } catch (TimeoutException) { break; } //While their are more bytes to be read if (expectedBytes.Length <= i) { //If we have read in more bytes then were actually sent Fail("ERROR!!!: We have received more bytes then were sent"); break; } byteRcvBuffer[i] = (byte)readInt; if (readInt != expectedBytes[i]) { //If the byte read is not the expected byte Fail("ERROR!!!: Expected to read {0} actual read byte {1}", (int)expectedBytes[i], readInt); } i++; if (expectedBytes.Length - i != com1.BytesToRead) { Fail("ERROR!!!: Expected BytesToRead={0} actual={1}", expectedBytes.Length - i, com1.BytesToRead); } } if (0 != com1.BytesToRead) { Fail("ERROR!!!: Expected BytesToRead=0 actual BytesToRead={0}", com1.BytesToRead); } if (com1.IsOpen) com1.Close(); if (com2.IsOpen) com2.Close(); } public class ASyncRead { private readonly SerialPort _com; private int _result; private readonly AutoResetEvent _readCompletedEvent; private readonly AutoResetEvent _readStartedEvent; private Exception _exception; public ASyncRead(SerialPort com) { _com = com; _result = int.MinValue; _readCompletedEvent = new AutoResetEvent(false); _readStartedEvent = new AutoResetEvent(false); _exception = null; } public void Read() { try { _readStartedEvent.Set(); _result = _com.ReadByte(); } catch (Exception e) { _exception = e; } finally { _readCompletedEvent.Set(); } } public AutoResetEvent ReadStartedEvent { get { return _readStartedEvent; } } public AutoResetEvent ReadCompletedEvent { get { return _readCompletedEvent; } } public int Result { get { return _result; } } public Exception Exception { get { return _exception; } } } #endregion } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/System.Composition.AttributedModel/src/System.Composition.AttributedModel.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <TargetFrameworks>$(NetCoreAppCurrent);$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum)</TargetFrameworks> <IsPackable>true</IsPackable> <StrongNameKeyId>Microsoft</StrongNameKeyId> <PackageDescription>Provides common attributes used by System.Composition types. Commonly Used Types: System.Composition.ExportAttribute System.Composition.ImportAttribute System.Composition.Convention.AttributedModelProvider</PackageDescription> </PropertyGroup> <ItemGroup> <Compile Include="System\Composition\Convention\AttributedModelProvider.cs" /> <Compile Include="System\Composition\ExportAttribute.cs" /> <Compile Include="System\Composition\ExportMetadataAttribute.cs" /> <Compile Include="System\Composition\ImportAttribute.cs" /> <Compile Include="System\Composition\ImportingConstructorAttribute.cs" /> <Compile Include="System\Composition\ImportManyAttribute.cs" /> <Compile Include="System\Composition\ImportMetadataConstraintAttribute.cs" /> <Compile Include="System\Composition\MetadataAttributeAttribute.cs" /> <Compile Include="System\Composition\OnImportsSatisfiedAttribute.cs" /> <Compile Include="System\Composition\PartMetadataAttribute.cs" /> <Compile Include="System\Composition\PartNotDiscoverableAttribute.cs" /> <Compile Include="System\Composition\SharedAttribute.cs" /> <Compile Include="System\Composition\SharingBoundaryAttribute.cs" /> </ItemGroup> <ItemGroup Condition="'$(TargetFrameworkIdentifier)' == '.NETCoreApp'"> <Reference Include="System.Runtime" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <TargetFrameworks>$(NetCoreAppCurrent);$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum)</TargetFrameworks> <IsPackable>true</IsPackable> <StrongNameKeyId>Microsoft</StrongNameKeyId> <PackageDescription>Provides common attributes used by System.Composition types. Commonly Used Types: System.Composition.ExportAttribute System.Composition.ImportAttribute System.Composition.Convention.AttributedModelProvider</PackageDescription> </PropertyGroup> <ItemGroup> <Compile Include="System\Composition\Convention\AttributedModelProvider.cs" /> <Compile Include="System\Composition\ExportAttribute.cs" /> <Compile Include="System\Composition\ExportMetadataAttribute.cs" /> <Compile Include="System\Composition\ImportAttribute.cs" /> <Compile Include="System\Composition\ImportingConstructorAttribute.cs" /> <Compile Include="System\Composition\ImportManyAttribute.cs" /> <Compile Include="System\Composition\ImportMetadataConstraintAttribute.cs" /> <Compile Include="System\Composition\MetadataAttributeAttribute.cs" /> <Compile Include="System\Composition\OnImportsSatisfiedAttribute.cs" /> <Compile Include="System\Composition\PartMetadataAttribute.cs" /> <Compile Include="System\Composition\PartNotDiscoverableAttribute.cs" /> <Compile Include="System\Composition\SharedAttribute.cs" /> <Compile Include="System\Composition\SharingBoundaryAttribute.cs" /> </ItemGroup> <ItemGroup Condition="'$(TargetFrameworkIdentifier)' == '.NETCoreApp'"> <Reference Include="System.Runtime" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.Byte.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void Store_Vector128_Byte() { var test = new StoreUnaryOpTest__Store_Vector128_Byte(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class StoreUnaryOpTest__Store_Vector128_Byte { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(Byte[] inArray1, Byte[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Byte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Byte>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Byte, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Byte> _fld1; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref testStruct._fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); return testStruct; } public void RunStructFldScenario(StoreUnaryOpTest__Store_Vector128_Byte testClass) { AdvSimd.Store((Byte*)testClass._dataTable.outArrayPtr, _fld1); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(StoreUnaryOpTest__Store_Vector128_Byte testClass) { fixed (Vector128<Byte>* pFld1 = &_fld1) { AdvSimd.Store((Byte*)testClass._dataTable.outArrayPtr, AdvSimd.LoadVector128((Byte*)(pFld1))); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static Byte[] _data1 = new Byte[Op1ElementCount]; private static Vector128<Byte> _clsVar1; private Vector128<Byte> _fld1; private DataTable _dataTable; static StoreUnaryOpTest__Store_Vector128_Byte() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _clsVar1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); } public StoreUnaryOpTest__Store_Vector128_Byte() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } _dataTable = new DataTable(_data1, new Byte[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); AdvSimd.Store((Byte*)_dataTable.outArrayPtr, Unsafe.Read<Vector128<Byte>>(_dataTable.inArray1Ptr)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); AdvSimd.Store((Byte*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr))); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); typeof(AdvSimd).GetMethod(nameof(AdvSimd.Store), new Type[] { typeof(Byte*), typeof(Vector128<Byte>) }) .Invoke(null, new object[] { Pointer.Box(_dataTable.outArrayPtr, typeof(Byte*)), Unsafe.Read<Vector128<Byte>>(_dataTable.inArray1Ptr) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); typeof(AdvSimd).GetMethod(nameof(AdvSimd.Store), new Type[] { typeof(Byte*), typeof(Vector128<Byte>) }) .Invoke(null, new object[] { Pointer.Box(_dataTable.outArrayPtr, typeof(Byte*)), AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr)) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); AdvSimd.Store((Byte*)_dataTable.outArrayPtr, _clsVar1); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Byte>* pClsVar1 = &_clsVar1) { AdvSimd.Store((Byte*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Byte*)(pClsVar1))); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Byte>>(_dataTable.inArray1Ptr); AdvSimd.Store((Byte*)_dataTable.outArrayPtr, op1); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr)); AdvSimd.Store((Byte*)_dataTable.outArrayPtr, op1); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new StoreUnaryOpTest__Store_Vector128_Byte(); AdvSimd.Store((Byte*)_dataTable.outArrayPtr, test._fld1); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new StoreUnaryOpTest__Store_Vector128_Byte(); fixed (Vector128<Byte>* pFld1 = &test._fld1) { AdvSimd.Store((Byte*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Byte*)(pFld1))); ValidateResult(test._fld1, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); AdvSimd.Store((Byte*)_dataTable.outArrayPtr, _fld1); ValidateResult(_fld1, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Byte>* pFld1 = &_fld1) { AdvSimd.Store((Byte*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Byte*)(pFld1))); ValidateResult(_fld1, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); AdvSimd.Store((Byte*)_dataTable.outArrayPtr, test._fld1); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); AdvSimd.Store((Byte*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Byte*)(&test._fld1))); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Byte> op1, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Byte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(Byte[] firstOp, Byte[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (int i = 0; i < RetElementCount; i++) { if (firstOp[i] != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.Store)}<Byte>(Vector128<Byte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void Store_Vector128_Byte() { var test = new StoreUnaryOpTest__Store_Vector128_Byte(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class StoreUnaryOpTest__Store_Vector128_Byte { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(Byte[] inArray1, Byte[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Byte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Byte>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Byte, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Byte> _fld1; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref testStruct._fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); return testStruct; } public void RunStructFldScenario(StoreUnaryOpTest__Store_Vector128_Byte testClass) { AdvSimd.Store((Byte*)testClass._dataTable.outArrayPtr, _fld1); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(StoreUnaryOpTest__Store_Vector128_Byte testClass) { fixed (Vector128<Byte>* pFld1 = &_fld1) { AdvSimd.Store((Byte*)testClass._dataTable.outArrayPtr, AdvSimd.LoadVector128((Byte*)(pFld1))); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static Byte[] _data1 = new Byte[Op1ElementCount]; private static Vector128<Byte> _clsVar1; private Vector128<Byte> _fld1; private DataTable _dataTable; static StoreUnaryOpTest__Store_Vector128_Byte() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _clsVar1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); } public StoreUnaryOpTest__Store_Vector128_Byte() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } _dataTable = new DataTable(_data1, new Byte[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); AdvSimd.Store((Byte*)_dataTable.outArrayPtr, Unsafe.Read<Vector128<Byte>>(_dataTable.inArray1Ptr)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); AdvSimd.Store((Byte*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr))); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); typeof(AdvSimd).GetMethod(nameof(AdvSimd.Store), new Type[] { typeof(Byte*), typeof(Vector128<Byte>) }) .Invoke(null, new object[] { Pointer.Box(_dataTable.outArrayPtr, typeof(Byte*)), Unsafe.Read<Vector128<Byte>>(_dataTable.inArray1Ptr) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); typeof(AdvSimd).GetMethod(nameof(AdvSimd.Store), new Type[] { typeof(Byte*), typeof(Vector128<Byte>) }) .Invoke(null, new object[] { Pointer.Box(_dataTable.outArrayPtr, typeof(Byte*)), AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr)) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); AdvSimd.Store((Byte*)_dataTable.outArrayPtr, _clsVar1); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Byte>* pClsVar1 = &_clsVar1) { AdvSimd.Store((Byte*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Byte*)(pClsVar1))); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Byte>>(_dataTable.inArray1Ptr); AdvSimd.Store((Byte*)_dataTable.outArrayPtr, op1); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr)); AdvSimd.Store((Byte*)_dataTable.outArrayPtr, op1); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new StoreUnaryOpTest__Store_Vector128_Byte(); AdvSimd.Store((Byte*)_dataTable.outArrayPtr, test._fld1); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new StoreUnaryOpTest__Store_Vector128_Byte(); fixed (Vector128<Byte>* pFld1 = &test._fld1) { AdvSimd.Store((Byte*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Byte*)(pFld1))); ValidateResult(test._fld1, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); AdvSimd.Store((Byte*)_dataTable.outArrayPtr, _fld1); ValidateResult(_fld1, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Byte>* pFld1 = &_fld1) { AdvSimd.Store((Byte*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Byte*)(pFld1))); ValidateResult(_fld1, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); AdvSimd.Store((Byte*)_dataTable.outArrayPtr, test._fld1); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); AdvSimd.Store((Byte*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Byte*)(&test._fld1))); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Byte> op1, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Byte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(Byte[] firstOp, Byte[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (int i = 0; i < RetElementCount; i++) { if (firstOp[i] != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.Store)}<Byte>(Vector128<Byte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/Microsoft.VisualBasic.Core/tests/Microsoft/VisualBasic/CompilerServices/DesignerGeneratedAttributeTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace Microsoft.VisualBasic.CompilerServices.Tests { public class DesignerGeneratedAttributeTests { [Fact] public void Ctor_Empty_Success() { new DesignerGeneratedAttribute(); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace Microsoft.VisualBasic.CompilerServices.Tests { public class DesignerGeneratedAttributeTests { [Fact] public void Ctor_Empty_Success() { new DesignerGeneratedAttribute(); } } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/tests/JIT/Directed/shift/uint32_ro.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType /> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="uint32.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType /> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="uint32.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/OidLookup.OpenSsl.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Runtime.InteropServices; namespace System.Security.Cryptography { internal static partial class OidLookup { private static bool ShouldUseCache(OidGroup oidGroup) { return true; } private static string? NativeOidToFriendlyName(string oid, OidGroup oidGroup, bool fallBackToAllGroups) { IntPtr friendlyNamePtr = IntPtr.Zero; int result = Interop.Crypto.LookupFriendlyNameByOid(oid, ref friendlyNamePtr); switch (result) { case 1: /* Success */ Debug.Assert(friendlyNamePtr != IntPtr.Zero, "friendlyNamePtr != IntPtr.Zero"); // The pointer is to a shared string, so marshalling it out is all that's required. return Marshal.PtrToStringAnsi(friendlyNamePtr); case -1: /* OpenSSL internal error */ throw Interop.Crypto.CreateOpenSslCryptographicException(); default: Debug.Assert(result == 0, $"LookupFriendlyNameByOid returned unexpected result {result}"); // The lookup may have left errors in this case, clean up for precaution. Interop.Crypto.ErrClearError(); return null; } } private static string? NativeFriendlyNameToOid(string friendlyName, OidGroup oidGroup, bool fallBackToAllGroups) { IntPtr sharedObject = Interop.Crypto.GetObjectDefinitionByName(friendlyName); if (sharedObject == IntPtr.Zero) { return null; } return Interop.Crypto.GetOidValue(sharedObject); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Runtime.InteropServices; namespace System.Security.Cryptography { internal static partial class OidLookup { private static bool ShouldUseCache(OidGroup oidGroup) { return true; } private static string? NativeOidToFriendlyName(string oid, OidGroup oidGroup, bool fallBackToAllGroups) { IntPtr friendlyNamePtr = IntPtr.Zero; int result = Interop.Crypto.LookupFriendlyNameByOid(oid, ref friendlyNamePtr); switch (result) { case 1: /* Success */ Debug.Assert(friendlyNamePtr != IntPtr.Zero, "friendlyNamePtr != IntPtr.Zero"); // The pointer is to a shared string, so marshalling it out is all that's required. return Marshal.PtrToStringAnsi(friendlyNamePtr); case -1: /* OpenSSL internal error */ throw Interop.Crypto.CreateOpenSslCryptographicException(); default: Debug.Assert(result == 0, $"LookupFriendlyNameByOid returned unexpected result {result}"); // The lookup may have left errors in this case, clean up for precaution. Interop.Crypto.ErrClearError(); return null; } } private static string? NativeFriendlyNameToOid(string friendlyName, OidGroup oidGroup, bool fallBackToAllGroups) { IntPtr sharedObject = Interop.Crypto.GetObjectDefinitionByName(friendlyName); if (sharedObject == IntPtr.Zero) { return null; } return Interop.Crypto.GetOidValue(sharedObject); } } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest899/Generated899.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated899.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated899.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/tests/JIT/HardwareIntrinsics/General/Vector64_1/op_BitwiseOr.Int32.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void op_BitwiseOrInt32() { var test = new VectorBinaryOpTest__op_BitwiseOrInt32(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBinaryOpTest__op_BitwiseOrInt32 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int32[] inArray1, Int32[] inArray2, Int32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Int32> _fld1; public Vector64<Int32> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); return testStruct; } public void RunStructFldScenario(VectorBinaryOpTest__op_BitwiseOrInt32 testClass) { var result = _fld1 | _fld2; Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static Int32[] _data1 = new Int32[Op1ElementCount]; private static Int32[] _data2 = new Int32[Op2ElementCount]; private static Vector64<Int32> _clsVar1; private static Vector64<Int32> _clsVar2; private Vector64<Int32> _fld1; private Vector64<Int32> _fld2; private DataTable _dataTable; static VectorBinaryOpTest__op_BitwiseOrInt32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); } public VectorBinaryOpTest__op_BitwiseOrInt32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data1, _data2, new Int32[RetElementCount], LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr) | Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Vector64<Int32>).GetMethod("op_BitwiseOr", new Type[] { typeof(Vector64<Int32>), typeof(Vector64<Int32>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = _clsVar1 | _clsVar2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr); var result = op1 | op2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBinaryOpTest__op_BitwiseOrInt32(); var result = test._fld1 | test._fld2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = _fld1 | _fld2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = test._fld1 | test._fld2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector64<Int32> op1, Vector64<Int32> op2, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int32[] left, Int32[] right, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != (int)(left[0] | right[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != (int)(left[i] | right[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector64)}.op_BitwiseOr<Int32>(Vector64<Int32>, Vector64<Int32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void op_BitwiseOrInt32() { var test = new VectorBinaryOpTest__op_BitwiseOrInt32(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBinaryOpTest__op_BitwiseOrInt32 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int32[] inArray1, Int32[] inArray2, Int32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Int32> _fld1; public Vector64<Int32> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); return testStruct; } public void RunStructFldScenario(VectorBinaryOpTest__op_BitwiseOrInt32 testClass) { var result = _fld1 | _fld2; Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static Int32[] _data1 = new Int32[Op1ElementCount]; private static Int32[] _data2 = new Int32[Op2ElementCount]; private static Vector64<Int32> _clsVar1; private static Vector64<Int32> _clsVar2; private Vector64<Int32> _fld1; private Vector64<Int32> _fld2; private DataTable _dataTable; static VectorBinaryOpTest__op_BitwiseOrInt32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); } public VectorBinaryOpTest__op_BitwiseOrInt32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data1, _data2, new Int32[RetElementCount], LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr) | Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Vector64<Int32>).GetMethod("op_BitwiseOr", new Type[] { typeof(Vector64<Int32>), typeof(Vector64<Int32>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = _clsVar1 | _clsVar2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr); var result = op1 | op2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBinaryOpTest__op_BitwiseOrInt32(); var result = test._fld1 | test._fld2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = _fld1 | _fld2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = test._fld1 | test._fld2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector64<Int32> op1, Vector64<Int32> op2, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int32[] left, Int32[] right, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != (int)(left[0] | right[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != (int)(left[i] | right[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector64)}.op_BitwiseOr<Int32>(Vector64<Int32>, Vector64<Int32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/tests/Loader/classloader/generics/GenericMethods/method001.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; class Foo { public T Function<T>(T i) { return i; } } public class Test_method001 { public static int counter = 0; public static bool result = true; public static void Eval(bool exp) { counter++; if (!exp) { result = exp; Console.WriteLine("Test Failed at location: " + counter); } } public static int Main() { Foo f = new Foo(); Eval(f.Function<int>(1).Equals(1)); Eval(f.Function<string>("string").Equals("string")); if (result) { Console.WriteLine("Test Passed"); return 100; } else { Console.WriteLine("Test Failed"); return 1; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; class Foo { public T Function<T>(T i) { return i; } } public class Test_method001 { public static int counter = 0; public static bool result = true; public static void Eval(bool exp) { counter++; if (!exp) { result = exp; Console.WriteLine("Test Failed at location: " + counter); } } public static int Main() { Foo f = new Foo(); Eval(f.Function<int>(1).Equals(1)); Eval(f.Function<string>("string").Equals("string")); if (result) { Console.WriteLine("Test Passed"); return 100; } else { Console.WriteLine("Test Failed"); return 1; } } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/CompilerServices/ClassConstructorRunner.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Threading; using System.Diagnostics; using System.Collections.Generic; using System.Runtime.InteropServices; using Internal.Runtime; using Internal.Runtime.CompilerHelpers; namespace System.Runtime.CompilerServices { internal static partial class ClassConstructorRunner { //============================================================================================================== // Ensures the class constructor for the given type has run. // // Called by the runtime when it finds a class whose static class constructor has probably not run // (probably because it checks in the initialized flag without thread synchronization). // // The context structure passed by reference lives in the image of one of the application's modules. // The contents are thus fixed (do not require pinning) and the address can be used as a unique // identifier for the context. // // This guarantee is violated in one specific case: where a class constructor cycle would cause a deadlock. If // so, per ECMA specs, this method returns without guaranteeing that the .cctor has run. // // No attempt is made to detect or break deadlocks due to other synchronization mechanisms. //============================================================================================================== private static unsafe object CheckStaticClassConstructionReturnGCStaticBase(StaticClassConstructionContext* context, object gcStaticBase) { EnsureClassConstructorRun(context); return gcStaticBase; } private static unsafe IntPtr CheckStaticClassConstructionReturnNonGCStaticBase(StaticClassConstructionContext* context, IntPtr nonGcStaticBase) { EnsureClassConstructorRun(context); return nonGcStaticBase; } private static unsafe object CheckStaticClassConstructionReturnThreadStaticBase(TypeManagerSlot* pModuleData, int typeTlsIndex, StaticClassConstructionContext* context) { object threadStaticBase = ThreadStatics.GetThreadStaticBaseForType(pModuleData, typeTlsIndex); EnsureClassConstructorRun(context); return threadStaticBase; } public static unsafe void EnsureClassConstructorRun(StaticClassConstructionContext* pContext) { IntPtr pfnCctor = pContext->cctorMethodAddress; NoisyLog("EnsureClassConstructorRun, cctor={0}, thread={1}", pfnCctor, CurrentManagedThreadId); // If we were called from MRT, this check is redundant but harmless. This is in case someone within classlib // (cough, Reflection) needs to call this explicitly. if (pContext->initialized == 1) { NoisyLog("Cctor already run, cctor={0}, thread={1}", pfnCctor, CurrentManagedThreadId); return; } CctorHandle cctor = Cctor.GetCctor(pContext); Cctor[] cctors = cctor.Array; int cctorIndex = cctor.Index; try { Lock cctorLock = cctors[cctorIndex].Lock; if (DeadlockAwareAcquire(cctor, pfnCctor)) { int currentManagedThreadId = CurrentManagedThreadId; try { NoisyLog("Acquired cctor lock, cctor={0}, thread={1}", pfnCctor, currentManagedThreadId); cctors[cctorIndex].HoldingThread = currentManagedThreadId; if (pContext->initialized == 0) // Check again in case some thread raced us while we were acquiring the lock. { TypeInitializationException priorException = cctors[cctorIndex].Exception; if (priorException != null) throw priorException; try { NoisyLog("Calling cctor, cctor={0}, thread={1}", pfnCctor, currentManagedThreadId); ((delegate*<void>)pfnCctor)(); // Insert a memory barrier here to order any writes executed as part of static class // construction above with respect to the initialized flag update we're about to make // below. This is important since the fast path for checking the cctor uses a normal read // and doesn't come here so without the barrier it could observe initialized == 1 but // still see uninitialized static fields on the class. Interlocked.MemoryBarrier(); NoisyLog("Set type inited, cctor={0}, thread={1}", pfnCctor, currentManagedThreadId); pContext->initialized = 1; } catch (Exception e) { TypeInitializationException wrappedException = new TypeInitializationException(null, SR.TypeInitialization_Type_NoTypeAvailable, e); cctors[cctorIndex].Exception = wrappedException; throw wrappedException; } } } finally { cctors[cctorIndex].HoldingThread = ManagedThreadIdNone; NoisyLog("Releasing cctor lock, cctor={0}, thread={1}", pfnCctor, currentManagedThreadId); cctorLock.Release(); } } else { // Cctor cycle resulted in a deadlock. We will break the guarantee and return without running the // .cctor. } } finally { Cctor.Release(cctor); } NoisyLog("EnsureClassConstructorRun complete, cctor={0}, thread={1}", pfnCctor, CurrentManagedThreadId); } //========================================================================================================= // Return value: // true - lock acquired. // false - deadlock detected. Lock not acquired. //========================================================================================================= private static bool DeadlockAwareAcquire(CctorHandle cctor, IntPtr pfnCctor) { const int WaitIntervalSeedInMS = 1; // seed with 1ms and double every time through the loop const int WaitIntervalLimitInMS = WaitIntervalSeedInMS << 7; // limit of 128ms int waitIntervalInMS = WaitIntervalSeedInMS; int cctorIndex = cctor.Index; Cctor[] cctors = cctor.Array; Lock lck = cctors[cctorIndex].Lock; if (lck.IsAcquired) return false; // Thread recursively triggered the same cctor. if (lck.TryAcquire(waitIntervalInMS)) return true; // We couldn't acquire the lock. See if this .cctor is involved in a cross-thread deadlock. If so, break // the deadlock by breaking the guarantee - we'll skip running the .cctor and let the caller take his chances. int currentManagedThreadId = CurrentManagedThreadId; int unmarkCookie = -1; try { // We'll spin in a forever-loop of checking for a deadlock state, then waiting a short time, then // checking for a deadlock state again, and so on. This is because the BlockedRecord info has a built-in // lag time - threads don't report themselves as blocking until they've been blocked for a non-trivial // amount of time. // // If the threads are deadlocked for any reason other a class constructor cycling, this loop will never // terminate - this is by design. If the user code inside the class constructors were to // deadlock themselves, then that's a bug in user code. for (;;) { using (LockHolder.Hold(s_cctorGlobalLock)) { // Ask the guy who holds the cctor lock we're trying to acquire who he's waiting for. Keep // walking down that chain until we either discover a cycle or reach a non-blocking state. Note // that reaching a non-blocking state is not proof that we've avoided a deadlock due to the // BlockingRecord reporting lag. CctorHandle cctorWalk = cctor; int chainStepCount = 0; for (; chainStepCount < Cctor.Count; chainStepCount++) { int cctorWalkIndex = cctorWalk.Index; Cctor[] cctorWalkArray = cctorWalk.Array; int holdingThread = cctorWalkArray[cctorWalkIndex].HoldingThread; if (holdingThread == currentManagedThreadId) { // Deadlock detected. We will break the guarantee and return without running the .cctor. DebugLog("A class constructor was skipped due to class constructor cycle. cctor={0}, thread={1}", pfnCctor, currentManagedThreadId); // We are maintaining an invariant that the BlockingRecords never show a cycle because, // before we add a record, we first check for a cycle. As a result, once we've said // we're waiting, we are committed to waiting and will not need to skip running this // .cctor. Debug.Assert(unmarkCookie == -1); return false; } if (holdingThread == ManagedThreadIdNone) { // No one appears to be holding this cctor lock. Give the current thread some more time // to acquire the lock. break; } cctorWalk = BlockingRecord.GetCctorThatThreadIsBlockedOn(holdingThread); if (cctorWalk.Array == null) { // The final thread in the chain appears to be blocked on nothing. Give the current // thread some more time to acquire the lock. break; } } // We don't allow cycles in the BlockingRecords, so we must always enumerate at most each entry, // but never more. Debug.Assert(chainStepCount < Cctor.Count); // We have not discovered a deadlock, so let's register the fact that we're waiting on another // thread and continue to wait. It is important that we only signal that we are blocked after // we check for a deadlock because, otherwise, we give all threads involved in the deadlock the // opportunity to break it themselves and that leads to "ping-ponging" between the cctors // involved in the cycle, allowing intermediate cctor results to be observed. // // The invariant here is that we never 'publish' a BlockingRecord that forms a cycle. So it is // important that the look-for-cycle-and-then-publish-wait-status operation be atomic with // respect to other updates to the BlockingRecords. if (unmarkCookie == -1) { NoisyLog("Mark thread blocked, cctor={0}, thread={1}", pfnCctor, currentManagedThreadId); unmarkCookie = BlockingRecord.MarkThreadAsBlocked(currentManagedThreadId, cctor); } } // _cctorGlobalLock scope if (waitIntervalInMS < WaitIntervalLimitInMS) waitIntervalInMS *= 2; // We didn't find a cycle yet, try to take the lock again. if (lck.TryAcquire(waitIntervalInMS)) return true; } // infinite loop } finally { if (unmarkCookie != -1) { NoisyLog("Unmark thread blocked, cctor={0}, thread={1}", pfnCctor, currentManagedThreadId); BlockingRecord.UnmarkThreadAsBlocked(unmarkCookie); } } } //============================================================================================================== // These structs are allocated on demand whenever the runtime tries to run a class constructor. Once the // the class constructor has been successfully initialized, we reclaim this structure. The structure is long- // lived only if the class constructor threw an exception. //============================================================================================================== private unsafe struct Cctor { public Lock Lock; public TypeInitializationException Exception; public int HoldingThread; private int _refCount; private StaticClassConstructionContext* _pContext; //========================================================================================================== // Gets the Cctor entry associated with a specific class constructor context (creating it if necessary.) //========================================================================================================== public static CctorHandle GetCctor(StaticClassConstructionContext* pContext) { #if DEBUG const int Grow = 2; #else const int Grow = 10; #endif // WASMTODO: Remove this when the Initialize method gets called by the runtime startup #if TARGET_WASM if (s_cctorGlobalLock == null) { Interlocked.CompareExchange(ref s_cctorGlobalLock, new Lock(), null); } if (s_cctorArrays == null) { Interlocked.CompareExchange(ref s_cctorArrays, new Cctor[10][], null); } #endif // TARGET_WASM using (LockHolder.Hold(s_cctorGlobalLock)) { Cctor[]? resultArray = null; int resultIndex = -1; if (s_count != 0) { // Search for the cctor context in our existing arrays for (int cctorIndex = 0; cctorIndex < s_cctorArraysCount; ++cctorIndex) { Cctor[] segment = s_cctorArrays[cctorIndex]; for (int i = 0; i < segment.Length; i++) { if (segment[i]._pContext == pContext) { resultArray = segment; resultIndex = i; break; } } if (resultArray != null) break; } } if (resultArray == null) { // look for an empty entry in an existing array for (int cctorIndex = 0; cctorIndex < s_cctorArraysCount; ++cctorIndex) { Cctor[] segment = s_cctorArrays[cctorIndex]; for (int i = 0; i < segment.Length; i++) { if (segment[i]._pContext == default(StaticClassConstructionContext*)) { resultArray = segment; resultIndex = i; break; } } if (resultArray != null) break; } if (resultArray == null) { // allocate a new array resultArray = new Cctor[Grow]; if (s_cctorArraysCount == s_cctorArrays.Length) { // grow the container Array.Resize(ref s_cctorArrays, (s_cctorArrays.Length * 2) + 1); } // store the array in the container, this cctor gets index 0 s_cctorArrays[s_cctorArraysCount] = resultArray; s_cctorArraysCount++; resultIndex = 0; } Debug.Assert(resultArray[resultIndex]._pContext == default(StaticClassConstructionContext*)); resultArray[resultIndex]._pContext = pContext; resultArray[resultIndex].Lock = new Lock(); s_count++; } Interlocked.Increment(ref resultArray[resultIndex]._refCount); return new CctorHandle(resultArray, resultIndex); } } public static int Count { get { Debug.Assert(s_cctorGlobalLock.IsAcquired); return s_count; } } public static void Release(CctorHandle cctor) { using (LockHolder.Hold(s_cctorGlobalLock)) { Cctor[] cctors = cctor.Array; int cctorIndex = cctor.Index; if (0 == Interlocked.Decrement(ref cctors[cctorIndex]._refCount)) { if (cctors[cctorIndex].Exception == null) { cctors[cctorIndex] = default; s_count--; } } } } } private struct CctorHandle { public CctorHandle(Cctor[] array, int index) { _array = array; _index = index; } public Cctor[] Array { get { return _array; } } public int Index { get { return _index; } } private Cctor[] _array; private int _index; } //============================================================================================================== // Keeps track of threads that are blocked on a cctor lock (alas, we don't have ThreadLocals here in // System.Private.CoreLib so we have to use a side table.) // // This is used for cross-thread deadlock detection. // // - Data is only entered here if a thread has been blocked past a certain timeout (otherwise, it's certainly // not participating of a deadlock.) // - Reads and writes to _blockingRecord are guarded by _cctorGlobalLock. // - BlockingRecords for individual threads are created on demand. Since this is a rare event, we won't attempt // to recycle them directly (however, // ManagedThreadId's are themselves recycled pretty quickly - and threads that inherit the managed id also // inherit the BlockingRecord.) //============================================================================================================== private struct BlockingRecord { public int ManagedThreadId; // ManagedThreadId of the blocked thread public CctorHandle BlockedOn; public static int MarkThreadAsBlocked(int managedThreadId, CctorHandle blockedOn) { #if DEBUG const int Grow = 2; #else const int Grow = 10; #endif using (LockHolder.Hold(s_cctorGlobalLock)) { if (s_blockingRecords == null) s_blockingRecords = new BlockingRecord[Grow]; int found; for (found = 0; found < s_nextBlockingRecordIndex; found++) { if (s_blockingRecords[found].ManagedThreadId == managedThreadId) break; } if (found == s_nextBlockingRecordIndex) { if (s_nextBlockingRecordIndex == s_blockingRecords.Length) { BlockingRecord[] newBlockingRecords = new BlockingRecord[s_blockingRecords.Length + Grow]; for (int i = 0; i < s_blockingRecords.Length; i++) { newBlockingRecords[i] = s_blockingRecords[i]; } s_blockingRecords = newBlockingRecords; } s_blockingRecords[s_nextBlockingRecordIndex].ManagedThreadId = managedThreadId; s_nextBlockingRecordIndex++; } s_blockingRecords[found].BlockedOn = blockedOn; return found; } } public static void UnmarkThreadAsBlocked(int blockRecordIndex) { // This method must never throw s_cctorGlobalLock.Acquire(); s_blockingRecords[blockRecordIndex].BlockedOn = new CctorHandle(null, 0); s_cctorGlobalLock.Release(); } public static CctorHandle GetCctorThatThreadIsBlockedOn(int managedThreadId) { Debug.Assert(s_cctorGlobalLock.IsAcquired); for (int i = 0; i < s_nextBlockingRecordIndex; i++) { if (s_blockingRecords[i].ManagedThreadId == managedThreadId) return s_blockingRecords[i].BlockedOn; } return new CctorHandle(null, 0); } private static BlockingRecord[] s_blockingRecords; private static int s_nextBlockingRecordIndex; } private static int CurrentManagedThreadId => ManagedThreadId.Current; private const int ManagedThreadIdNone = ManagedThreadId.IdNone; private static Lock s_cctorGlobalLock; // These three statics are used by ClassConstructorRunner.Cctor but moved out to avoid an unnecessary // extra class constructor call. // // Because Cctor's are mutable structs, we have to give our callers raw references to the underlying arrays // for this collection to be usable. This also means once we place a Cctor in an array, we can't grow or // reallocate the array. private static Cctor[][] s_cctorArrays; private static int s_cctorArraysCount; private static int s_count; // Eager construction called from LibraryInitialize Cctor.GetCctor uses _cctorGlobalLock. internal static void Initialize() { s_cctorArrays = new Cctor[10][]; s_cctorGlobalLock = new Lock(); } [Conditional("ENABLE_NOISY_CCTOR_LOG")] private static void NoisyLog(string format, IntPtr cctorMethod, int threadId) { // We cannot utilize any of the typical number formatting code because it triggers globalization code to run // and this cctor code is layered below globalization. #if DEBUG Debug.WriteLine(format, ToHexString(cctorMethod), ToHexString(threadId)); #endif // DEBUG } [Conditional("DEBUG")] private static void DebugLog(string format, IntPtr cctorMethod, int threadId) { // We cannot utilize any of the typical number formatting code because it triggers globalization code to run // and this cctor code is layered below globalization. #if DEBUG Debug.WriteLine(format, ToHexString(cctorMethod), ToHexString(threadId)); #endif } // We cannot utilize any of the typical number formatting code because it triggers globalization code to run // and this cctor code is layered below globalization. #if DEBUG private static string ToHexString(int num) { return ToHexStringUnsignedLong((ulong)num, false, 8); } private static string ToHexString(IntPtr num) { return ToHexStringUnsignedLong((ulong)num, false, 16); } private static char GetHexChar(uint u) { if (u < 10) return unchecked((char)('0' + u)); return unchecked((char)('a' + (u - 10))); } public static unsafe string ToHexStringUnsignedLong(ulong u, bool zeroPrepad, int numChars) { char[] chars = new char[numChars]; int i = numChars - 1; for (; i >= 0; i--) { chars[i] = GetHexChar((uint)(u % 16)); u = u / 16; if ((i == 0) || (!zeroPrepad && (u == 0))) break; } string str; fixed (char* p = &chars[i]) { str = new string(p, 0, numChars - i); } return str; } #endif } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Threading; using System.Diagnostics; using System.Collections.Generic; using System.Runtime.InteropServices; using Internal.Runtime; using Internal.Runtime.CompilerHelpers; namespace System.Runtime.CompilerServices { internal static partial class ClassConstructorRunner { //============================================================================================================== // Ensures the class constructor for the given type has run. // // Called by the runtime when it finds a class whose static class constructor has probably not run // (probably because it checks in the initialized flag without thread synchronization). // // The context structure passed by reference lives in the image of one of the application's modules. // The contents are thus fixed (do not require pinning) and the address can be used as a unique // identifier for the context. // // This guarantee is violated in one specific case: where a class constructor cycle would cause a deadlock. If // so, per ECMA specs, this method returns without guaranteeing that the .cctor has run. // // No attempt is made to detect or break deadlocks due to other synchronization mechanisms. //============================================================================================================== private static unsafe object CheckStaticClassConstructionReturnGCStaticBase(StaticClassConstructionContext* context, object gcStaticBase) { EnsureClassConstructorRun(context); return gcStaticBase; } private static unsafe IntPtr CheckStaticClassConstructionReturnNonGCStaticBase(StaticClassConstructionContext* context, IntPtr nonGcStaticBase) { EnsureClassConstructorRun(context); return nonGcStaticBase; } private static unsafe object CheckStaticClassConstructionReturnThreadStaticBase(TypeManagerSlot* pModuleData, int typeTlsIndex, StaticClassConstructionContext* context) { object threadStaticBase = ThreadStatics.GetThreadStaticBaseForType(pModuleData, typeTlsIndex); EnsureClassConstructorRun(context); return threadStaticBase; } public static unsafe void EnsureClassConstructorRun(StaticClassConstructionContext* pContext) { IntPtr pfnCctor = pContext->cctorMethodAddress; NoisyLog("EnsureClassConstructorRun, cctor={0}, thread={1}", pfnCctor, CurrentManagedThreadId); // If we were called from MRT, this check is redundant but harmless. This is in case someone within classlib // (cough, Reflection) needs to call this explicitly. if (pContext->initialized == 1) { NoisyLog("Cctor already run, cctor={0}, thread={1}", pfnCctor, CurrentManagedThreadId); return; } CctorHandle cctor = Cctor.GetCctor(pContext); Cctor[] cctors = cctor.Array; int cctorIndex = cctor.Index; try { Lock cctorLock = cctors[cctorIndex].Lock; if (DeadlockAwareAcquire(cctor, pfnCctor)) { int currentManagedThreadId = CurrentManagedThreadId; try { NoisyLog("Acquired cctor lock, cctor={0}, thread={1}", pfnCctor, currentManagedThreadId); cctors[cctorIndex].HoldingThread = currentManagedThreadId; if (pContext->initialized == 0) // Check again in case some thread raced us while we were acquiring the lock. { TypeInitializationException priorException = cctors[cctorIndex].Exception; if (priorException != null) throw priorException; try { NoisyLog("Calling cctor, cctor={0}, thread={1}", pfnCctor, currentManagedThreadId); ((delegate*<void>)pfnCctor)(); // Insert a memory barrier here to order any writes executed as part of static class // construction above with respect to the initialized flag update we're about to make // below. This is important since the fast path for checking the cctor uses a normal read // and doesn't come here so without the barrier it could observe initialized == 1 but // still see uninitialized static fields on the class. Interlocked.MemoryBarrier(); NoisyLog("Set type inited, cctor={0}, thread={1}", pfnCctor, currentManagedThreadId); pContext->initialized = 1; } catch (Exception e) { TypeInitializationException wrappedException = new TypeInitializationException(null, SR.TypeInitialization_Type_NoTypeAvailable, e); cctors[cctorIndex].Exception = wrappedException; throw wrappedException; } } } finally { cctors[cctorIndex].HoldingThread = ManagedThreadIdNone; NoisyLog("Releasing cctor lock, cctor={0}, thread={1}", pfnCctor, currentManagedThreadId); cctorLock.Release(); } } else { // Cctor cycle resulted in a deadlock. We will break the guarantee and return without running the // .cctor. } } finally { Cctor.Release(cctor); } NoisyLog("EnsureClassConstructorRun complete, cctor={0}, thread={1}", pfnCctor, CurrentManagedThreadId); } //========================================================================================================= // Return value: // true - lock acquired. // false - deadlock detected. Lock not acquired. //========================================================================================================= private static bool DeadlockAwareAcquire(CctorHandle cctor, IntPtr pfnCctor) { const int WaitIntervalSeedInMS = 1; // seed with 1ms and double every time through the loop const int WaitIntervalLimitInMS = WaitIntervalSeedInMS << 7; // limit of 128ms int waitIntervalInMS = WaitIntervalSeedInMS; int cctorIndex = cctor.Index; Cctor[] cctors = cctor.Array; Lock lck = cctors[cctorIndex].Lock; if (lck.IsAcquired) return false; // Thread recursively triggered the same cctor. if (lck.TryAcquire(waitIntervalInMS)) return true; // We couldn't acquire the lock. See if this .cctor is involved in a cross-thread deadlock. If so, break // the deadlock by breaking the guarantee - we'll skip running the .cctor and let the caller take his chances. int currentManagedThreadId = CurrentManagedThreadId; int unmarkCookie = -1; try { // We'll spin in a forever-loop of checking for a deadlock state, then waiting a short time, then // checking for a deadlock state again, and so on. This is because the BlockedRecord info has a built-in // lag time - threads don't report themselves as blocking until they've been blocked for a non-trivial // amount of time. // // If the threads are deadlocked for any reason other a class constructor cycling, this loop will never // terminate - this is by design. If the user code inside the class constructors were to // deadlock themselves, then that's a bug in user code. for (;;) { using (LockHolder.Hold(s_cctorGlobalLock)) { // Ask the guy who holds the cctor lock we're trying to acquire who he's waiting for. Keep // walking down that chain until we either discover a cycle or reach a non-blocking state. Note // that reaching a non-blocking state is not proof that we've avoided a deadlock due to the // BlockingRecord reporting lag. CctorHandle cctorWalk = cctor; int chainStepCount = 0; for (; chainStepCount < Cctor.Count; chainStepCount++) { int cctorWalkIndex = cctorWalk.Index; Cctor[] cctorWalkArray = cctorWalk.Array; int holdingThread = cctorWalkArray[cctorWalkIndex].HoldingThread; if (holdingThread == currentManagedThreadId) { // Deadlock detected. We will break the guarantee and return without running the .cctor. DebugLog("A class constructor was skipped due to class constructor cycle. cctor={0}, thread={1}", pfnCctor, currentManagedThreadId); // We are maintaining an invariant that the BlockingRecords never show a cycle because, // before we add a record, we first check for a cycle. As a result, once we've said // we're waiting, we are committed to waiting and will not need to skip running this // .cctor. Debug.Assert(unmarkCookie == -1); return false; } if (holdingThread == ManagedThreadIdNone) { // No one appears to be holding this cctor lock. Give the current thread some more time // to acquire the lock. break; } cctorWalk = BlockingRecord.GetCctorThatThreadIsBlockedOn(holdingThread); if (cctorWalk.Array == null) { // The final thread in the chain appears to be blocked on nothing. Give the current // thread some more time to acquire the lock. break; } } // We don't allow cycles in the BlockingRecords, so we must always enumerate at most each entry, // but never more. Debug.Assert(chainStepCount < Cctor.Count); // We have not discovered a deadlock, so let's register the fact that we're waiting on another // thread and continue to wait. It is important that we only signal that we are blocked after // we check for a deadlock because, otherwise, we give all threads involved in the deadlock the // opportunity to break it themselves and that leads to "ping-ponging" between the cctors // involved in the cycle, allowing intermediate cctor results to be observed. // // The invariant here is that we never 'publish' a BlockingRecord that forms a cycle. So it is // important that the look-for-cycle-and-then-publish-wait-status operation be atomic with // respect to other updates to the BlockingRecords. if (unmarkCookie == -1) { NoisyLog("Mark thread blocked, cctor={0}, thread={1}", pfnCctor, currentManagedThreadId); unmarkCookie = BlockingRecord.MarkThreadAsBlocked(currentManagedThreadId, cctor); } } // _cctorGlobalLock scope if (waitIntervalInMS < WaitIntervalLimitInMS) waitIntervalInMS *= 2; // We didn't find a cycle yet, try to take the lock again. if (lck.TryAcquire(waitIntervalInMS)) return true; } // infinite loop } finally { if (unmarkCookie != -1) { NoisyLog("Unmark thread blocked, cctor={0}, thread={1}", pfnCctor, currentManagedThreadId); BlockingRecord.UnmarkThreadAsBlocked(unmarkCookie); } } } //============================================================================================================== // These structs are allocated on demand whenever the runtime tries to run a class constructor. Once the // the class constructor has been successfully initialized, we reclaim this structure. The structure is long- // lived only if the class constructor threw an exception. //============================================================================================================== private unsafe struct Cctor { public Lock Lock; public TypeInitializationException Exception; public int HoldingThread; private int _refCount; private StaticClassConstructionContext* _pContext; //========================================================================================================== // Gets the Cctor entry associated with a specific class constructor context (creating it if necessary.) //========================================================================================================== public static CctorHandle GetCctor(StaticClassConstructionContext* pContext) { #if DEBUG const int Grow = 2; #else const int Grow = 10; #endif // WASMTODO: Remove this when the Initialize method gets called by the runtime startup #if TARGET_WASM if (s_cctorGlobalLock == null) { Interlocked.CompareExchange(ref s_cctorGlobalLock, new Lock(), null); } if (s_cctorArrays == null) { Interlocked.CompareExchange(ref s_cctorArrays, new Cctor[10][], null); } #endif // TARGET_WASM using (LockHolder.Hold(s_cctorGlobalLock)) { Cctor[]? resultArray = null; int resultIndex = -1; if (s_count != 0) { // Search for the cctor context in our existing arrays for (int cctorIndex = 0; cctorIndex < s_cctorArraysCount; ++cctorIndex) { Cctor[] segment = s_cctorArrays[cctorIndex]; for (int i = 0; i < segment.Length; i++) { if (segment[i]._pContext == pContext) { resultArray = segment; resultIndex = i; break; } } if (resultArray != null) break; } } if (resultArray == null) { // look for an empty entry in an existing array for (int cctorIndex = 0; cctorIndex < s_cctorArraysCount; ++cctorIndex) { Cctor[] segment = s_cctorArrays[cctorIndex]; for (int i = 0; i < segment.Length; i++) { if (segment[i]._pContext == default(StaticClassConstructionContext*)) { resultArray = segment; resultIndex = i; break; } } if (resultArray != null) break; } if (resultArray == null) { // allocate a new array resultArray = new Cctor[Grow]; if (s_cctorArraysCount == s_cctorArrays.Length) { // grow the container Array.Resize(ref s_cctorArrays, (s_cctorArrays.Length * 2) + 1); } // store the array in the container, this cctor gets index 0 s_cctorArrays[s_cctorArraysCount] = resultArray; s_cctorArraysCount++; resultIndex = 0; } Debug.Assert(resultArray[resultIndex]._pContext == default(StaticClassConstructionContext*)); resultArray[resultIndex]._pContext = pContext; resultArray[resultIndex].Lock = new Lock(); s_count++; } Interlocked.Increment(ref resultArray[resultIndex]._refCount); return new CctorHandle(resultArray, resultIndex); } } public static int Count { get { Debug.Assert(s_cctorGlobalLock.IsAcquired); return s_count; } } public static void Release(CctorHandle cctor) { using (LockHolder.Hold(s_cctorGlobalLock)) { Cctor[] cctors = cctor.Array; int cctorIndex = cctor.Index; if (0 == Interlocked.Decrement(ref cctors[cctorIndex]._refCount)) { if (cctors[cctorIndex].Exception == null) { cctors[cctorIndex] = default; s_count--; } } } } } private struct CctorHandle { public CctorHandle(Cctor[] array, int index) { _array = array; _index = index; } public Cctor[] Array { get { return _array; } } public int Index { get { return _index; } } private Cctor[] _array; private int _index; } //============================================================================================================== // Keeps track of threads that are blocked on a cctor lock (alas, we don't have ThreadLocals here in // System.Private.CoreLib so we have to use a side table.) // // This is used for cross-thread deadlock detection. // // - Data is only entered here if a thread has been blocked past a certain timeout (otherwise, it's certainly // not participating of a deadlock.) // - Reads and writes to _blockingRecord are guarded by _cctorGlobalLock. // - BlockingRecords for individual threads are created on demand. Since this is a rare event, we won't attempt // to recycle them directly (however, // ManagedThreadId's are themselves recycled pretty quickly - and threads that inherit the managed id also // inherit the BlockingRecord.) //============================================================================================================== private struct BlockingRecord { public int ManagedThreadId; // ManagedThreadId of the blocked thread public CctorHandle BlockedOn; public static int MarkThreadAsBlocked(int managedThreadId, CctorHandle blockedOn) { #if DEBUG const int Grow = 2; #else const int Grow = 10; #endif using (LockHolder.Hold(s_cctorGlobalLock)) { if (s_blockingRecords == null) s_blockingRecords = new BlockingRecord[Grow]; int found; for (found = 0; found < s_nextBlockingRecordIndex; found++) { if (s_blockingRecords[found].ManagedThreadId == managedThreadId) break; } if (found == s_nextBlockingRecordIndex) { if (s_nextBlockingRecordIndex == s_blockingRecords.Length) { BlockingRecord[] newBlockingRecords = new BlockingRecord[s_blockingRecords.Length + Grow]; for (int i = 0; i < s_blockingRecords.Length; i++) { newBlockingRecords[i] = s_blockingRecords[i]; } s_blockingRecords = newBlockingRecords; } s_blockingRecords[s_nextBlockingRecordIndex].ManagedThreadId = managedThreadId; s_nextBlockingRecordIndex++; } s_blockingRecords[found].BlockedOn = blockedOn; return found; } } public static void UnmarkThreadAsBlocked(int blockRecordIndex) { // This method must never throw s_cctorGlobalLock.Acquire(); s_blockingRecords[blockRecordIndex].BlockedOn = new CctorHandle(null, 0); s_cctorGlobalLock.Release(); } public static CctorHandle GetCctorThatThreadIsBlockedOn(int managedThreadId) { Debug.Assert(s_cctorGlobalLock.IsAcquired); for (int i = 0; i < s_nextBlockingRecordIndex; i++) { if (s_blockingRecords[i].ManagedThreadId == managedThreadId) return s_blockingRecords[i].BlockedOn; } return new CctorHandle(null, 0); } private static BlockingRecord[] s_blockingRecords; private static int s_nextBlockingRecordIndex; } private static int CurrentManagedThreadId => ManagedThreadId.Current; private const int ManagedThreadIdNone = ManagedThreadId.IdNone; private static Lock s_cctorGlobalLock; // These three statics are used by ClassConstructorRunner.Cctor but moved out to avoid an unnecessary // extra class constructor call. // // Because Cctor's are mutable structs, we have to give our callers raw references to the underlying arrays // for this collection to be usable. This also means once we place a Cctor in an array, we can't grow or // reallocate the array. private static Cctor[][] s_cctorArrays; private static int s_cctorArraysCount; private static int s_count; // Eager construction called from LibraryInitialize Cctor.GetCctor uses _cctorGlobalLock. internal static void Initialize() { s_cctorArrays = new Cctor[10][]; s_cctorGlobalLock = new Lock(); } [Conditional("ENABLE_NOISY_CCTOR_LOG")] private static void NoisyLog(string format, IntPtr cctorMethod, int threadId) { // We cannot utilize any of the typical number formatting code because it triggers globalization code to run // and this cctor code is layered below globalization. #if DEBUG Debug.WriteLine(format, ToHexString(cctorMethod), ToHexString(threadId)); #endif // DEBUG } [Conditional("DEBUG")] private static void DebugLog(string format, IntPtr cctorMethod, int threadId) { // We cannot utilize any of the typical number formatting code because it triggers globalization code to run // and this cctor code is layered below globalization. #if DEBUG Debug.WriteLine(format, ToHexString(cctorMethod), ToHexString(threadId)); #endif } // We cannot utilize any of the typical number formatting code because it triggers globalization code to run // and this cctor code is layered below globalization. #if DEBUG private static string ToHexString(int num) { return ToHexStringUnsignedLong((ulong)num, false, 8); } private static string ToHexString(IntPtr num) { return ToHexStringUnsignedLong((ulong)num, false, 16); } private static char GetHexChar(uint u) { if (u < 10) return unchecked((char)('0' + u)); return unchecked((char)('a' + (u - 10))); } public static unsafe string ToHexStringUnsignedLong(ulong u, bool zeroPrepad, int numChars) { char[] chars = new char[numChars]; int i = numChars - 1; for (; i >= 0; i--) { chars[i] = GetHexChar((uint)(u % 16)); u = u / 16; if ((i == 0) || (!zeroPrepad && (u == 0))) break; } string str; fixed (char* p = &chars[i]) { str = new string(p, 0, numChars - i); } return str; } #endif } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/tests.proj
<Project Sdk="Microsoft.Build.Traversal"> <PropertyGroup Condition="'$(BuildAllConfigurations)' != 'true'"> <!-- Build for NetCoreAppCurrent by default if no BuildTargetFramework is supplied or if not all configurations are built. --> <TargetFramework>$([MSBuild]::ValueOrDefault('$(BuildTargetFramework)', '$(NetCoreAppCurrent)'))-$(TargetOS)</TargetFramework> <!-- Filter ProjectReferences to build the best matching target framework only. --> <FilterTraversalProjectReferences>true</FilterTraversalProjectReferences> </PropertyGroup> <PropertyGroup> <TestInParallel Condition="'$(Coverage)' == 'true'">false</TestInParallel> <!-- For tests we want to continue running if a test run failed. --> <TestContinueOnError>ErrorAndContinue</TestContinueOnError> <TraversalGlobalProperties>BuildAllProjects=true</TraversalGlobalProperties> <CoverageReportInputPath>$(ArtifactsBinDir)\*.Tests\**\coverage.opencover.xml</CoverageReportInputPath> <CoverageReportDir>$(ArtifactsDir)coverage</CoverageReportDir> <EnableCoverageSupport>true</EnableCoverageSupport> <TestAssemblies Condition="'$(TestAssemblies)' == ''">true</TestAssemblies> <TestPackages Condition="'$(TestPackages)' == ''">false</TestPackages> <TestTrimming Condition="'$(TestTrimming)' == ''">false</TestTrimming> <RunHighAOTResourceRequiringTestsOnly Condition="'$(RunHighAOTResourceRequiringTestsOnly)' == ''">false</RunHighAOTResourceRequiringTestsOnly> <!-- Don't build samples, and functional tests on EAT, AOT, WBT, and Debugger lanes --> <RunWasmSamples Condition="'$(RunSmokeTestsOnly)' != 'true' and '$(RunHighAOTResourceRequiringTestsOnly)' != 'true' and '$(TargetOS)' == 'Browser' and '$(BuildAOTTestsOnHelix)' != 'true' and '$(TestWasmBuildTests)' != 'true' and '$(TestWasmDebuggerTests)' != 'true'">true</RunWasmSamples> </PropertyGroup> <ItemGroup Condition="'$(TargetOS)' == 'Browser'"> <!-- https://github.com/dotnet/runtime/issues/65356 - OOM while linking --> <HighAOTResourceRequiringProject Include="$(MSBuildThisFileDirectory)System.Text.Json\tests\System.Text.Json.SourceGeneration.Tests\System.Text.Json.SourceGeneration.Roslyn3.11.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/65411 - possible OOM when compiling System.Text.Json.SourceGeneration.Roslyn4.0.Tests.dll.bc -> .o --> <HighAOTResourceRequiringProject Include="$(MSBuildThisFileDirectory)System.Text.Json\tests\System.Text.Json.SourceGeneration.Tests\System.Text.Json.SourceGeneration.Roslyn4.0.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/61524 - OOM while linking --> <HighAOTResourceRequiringProject Include="$(MSBuildThisFileDirectory)System.Text.Json\tests\System.Text.Json.Tests\System.Text.Json.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/66647 --> <HighAOTResourceRequiringProject Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Logging.Abstractions\tests\Microsoft.Extensions.Logging.Generators.Tests\Microsoft.Extensions.Logging.Generators.Roslyn3.11.Tests.csproj" /> </ItemGroup> <!-- Samples which are too complex for CI --> <ItemGroup Condition="'$(TargetOS)' == 'Browser'"> <ProjectExclusions Include="$(MonoProjectRoot)sample\wasm\console-node-ts\Wasm.Console.Node.TS.Sample.csproj" /> <ProjectExclusions Include="$(MonoProjectRoot)sample\wasm\browser-webpack\Wasm.Browser.WebPack.Sample.csproj" /> <ProjectExclusions Include="$(MonoProjectRoot)sample\wasm\browser-nextjs\Wasm.Browser.NextJs.Sample.csproj" /> <!-- These tests are completely disabled on wasm --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.RegularExpressions/tests/System.Text.RegularExpressions.Generators.Tests/System.Text.RegularExpressions.Generators.Tests.csproj" /> </ItemGroup> <!-- Wasm aot on all platforms --> <ItemGroup Condition="'$(TargetOS)' == 'Browser' and '$(BuildAOTTestsOnHelix)' == 'true' and '$(RunDisabledWasmTests)' != 'true' and '$(RunAOTCompilation)' == 'true'"> <!-- https://github.com/dotnet/runtime/issues/66118 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.RegularExpressions\tests\UnitTests\System.Text.RegularExpressions.Unit.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/61756 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.RegularExpressions\tests\FunctionalTests\System.Text.RegularExpressions.Tests.csproj" /> </ItemGroup> <!-- Projects that don't support code coverage measurement. --> <ItemGroup Condition="'$(Coverage)' == 'true'"> <ProjectExclusions Include="$(CommonTestPath)Common.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.XmlSerializer.Generator\tests\Microsoft.XmlSerializer.Generator.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Transactions.Local\tests\System.Transactions.Local.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetArchitecture)' == 'ARMv6'"> <!-- https://github.com/dotnet/runtime/issues/64673 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Ping\tests\FunctionalTests\System.Net.Ping.Functional.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetsMobile)' == 'true' or '$(TargetArchitecture)' == 'ARMv6'"> <!-- LibraryImportGenerator runtime tests depend on DNNE, which does not support mobile platforms. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\LibraryImportGenerator.Tests\LibraryImportGenerator.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\LibraryImportGenerator.UnitTests\LibraryImportGenerator.Unit.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'windows' and '$(TargetArchitecture)' == 'arm'"> <!-- LibraryImportGenerator runtime tests depend on DNNE, which does not support Windows ARM32 as we don't officially support it. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\LibraryImportGenerator.Tests\LibraryImportGenerator.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetArchitecture)' == 'armel'"> <!-- LibraryImportGenerator runtime tests depend on DNNE, which does not support armel as we don't officially support it. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\LibraryImportGenerator.Tests\LibraryImportGenerator.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetArchitecture)' == 'arm'"> <!-- Issue: https://github.com/dotnet/runtime/issues/60705 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\LibraryImportGenerator.UnitTests\LibraryImportGenerator.Unit.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'FreeBSD'"> <!-- LibraryImportGenerator runtime tests build depends pulling down a pre-built nethost binary, which is not available for FreeBSD. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\LibraryImportGenerator.Tests\LibraryImportGenerator.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'linux' and '$(TargetArchitecture)' == 's390x'"> <!-- LibraryImportGenerator runtime tests build depends pulling down a pre-built nethost binary, which is not available for s390x. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\LibraryImportGenerator.Tests\LibraryImportGenerator.Tests.csproj" /> <!-- LibraryImportGenerator unit tests fail since NuGet 5.6.0 signature verification does not work on big-endian systems (needs >= 5.11.0). --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\LibraryImportGenerator.UnitTests\LibraryImportGenerator.Unit.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Windows' and '$(RuntimeFlavor)' == 'Mono' and '$(RunDisabledMonoTestsOnWindows)' != 'true'"> <!-- Issue: https://github.com/dotnet/runtime/issues/53281 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.WebSockets.Client\tests\System.Net.WebSockets.Client.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\LibraryImportGenerator.UnitTests\LibraryImportGenerator.Unit.Tests.csproj" /> <!-- Issue: https://github.com/dotnet/runtime/issues/63723 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Drawing.Common\tests\System.Drawing.Common.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' != 'Windows'"> <!-- windows specific tests --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Hosting.WindowsServices/tests/Microsoft.Extensions.Hosting.WindowsServices.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android'"> <!-- Never going to run on Android --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.OpenSsl\tests\System.Security.Cryptography.OpenSsl.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android' and '$(RunDisabledAndroidTests)' != 'true'"> <!-- Tests time out intermittently --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Hosting\tests\UnitTests\Microsoft.Extensions.Hosting.Unit.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Security\tests\FunctionalTests\System.Net.Security.Tests.csproj" /> <!-- Tests crash --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Globalization\tests\Invariant\Invariant.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.FileSystem\tests\System.IO.FileSystem.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.FileSystem.Watcher\tests\System.IO.FileSystem.Watcher.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.Ports\tests\System.IO.Ports.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Quic\tests\FunctionalTests\System.Net.Quic.Functional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection\tests\System.Reflection.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.Serialization.Formatters\tests\System.Runtime.Serialization.Formatters.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Algorithms\tests\System.Security.Cryptography.Algorithms.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Csp\tests\System.Security.Cryptography.Csp.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Threading.Thread\tests\System.Threading.Thread.Tests.csproj" /> <!-- Actual test failures --> <!-- https://github.com/dotnet/runtime/issues/50871 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.DependencyInjection\tests\DI.Tests\Microsoft.Extensions.DependencyInjection.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/50874 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Logging.EventSource\tests\Microsoft.Extensions.Logging.EventSource.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/50923 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Data.Common\tests\System.Data.Common.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/50926 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Diagnostics.Tracing\tests\System.Diagnostics.Tracing.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/49936 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.Json\tests\System.Text.Json.Tests\System.Text.Json.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Http\tests\FunctionalTests\System.Net.Http.Functional.Tests.csproj" /> <!-- Execution may be compromised --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Caching.Memory\tests\Microsoft.Extensions.Caching.Memory.Tests.csproj" /> <!-- PSNE --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Console/tests/System.Console.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Primitives/tests/FunctionalTests/System.Net.Primitives.Functional.Tests.csproj" /> <!-- Crashes on CI (possibly flakey) --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml/tests/Misc/System.Xml.Misc.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Dynamic.Runtime/tests/System.Dynamic.Runtime.Tests.csproj"/> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android' and '$(TargetArchitecture)' == 'arm64' and '$(RunDisabledAndroidTests)' != 'true'"> <!-- Crashes on CI (possibly flakey) https://github.com/dotnet/runtime/issues/52615 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Configuration/tests/FunctionalTests/Microsoft.Extensions.Configuration.Functional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Configuration.Binder/tests/Microsoft.Extensions.Configuration.Binder.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.FileProviders.Physical/tests/Microsoft.Extensions.FileProviders.Physical.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Win32.Primitives/tests/Microsoft.Win32.Primitives.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Collections.Concurrent/tests/System.Collections.Concurrent.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Collections.Specialized/tests/System.Collections.Specialized.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.ComponentModel.Annotations/tests/System.ComponentModel.Annotations.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Composition.Hosting/tests/System.Composition.Hosting.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.FileSystem.Primitives/tests/System.IO.FileSystem.Primitives.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Memory/tests/System.Memory.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Mail/tests/Functional/System.Net.Mail.Functional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.NameResolution/tests/PalTests/System.Net.NameResolution.Pal.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.WebClient/tests/System.Net.WebClient.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Numerics.Tensors/tests/System.Numerics.Tensors.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml/tests/XmlReader/Tests/System.Xml.RW.XmlReader.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml/tests/XPath/XPathDocument/System.Xml.XPath.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Uri/tests/ExtendedFunctionalTests/System.Private.Uri.ExtendedFunctional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection.DispatchProxy/tests/System.Reflection.DispatchProxy.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android' and '$(TargetArchitecture)' == 'x64' and '$(RunDisabledAndroidTests)' != 'true'"> <!-- Test flakiness on x64 https://github.com/dotnet/runtime/issues/49937 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Threading\tests\System.Threading.Tests.csproj" /> <!-- Out of memory https://github.com/dotnet/runtime/issues/62547 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography\tests\System.Security.Cryptography.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android' and '$(TargetArchitecture)' == 'x86' and '$(RunDisabledAndroidTests)' != 'true'"> <!-- Crashes only on x86 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Primitives\tests\Microsoft.Extensions.Primitives.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.Extensions\tests\System.Runtime.Extensions.Tests.csproj" /> <!-- Out of memory https://github.com/dotnet/runtime/issues/62547 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography\tests\System.Security.Cryptography.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/50493 --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\Android\Device_Emulator\AOT\Android.Device_Emulator.Aot.Test.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'iOS' and '$(RunDisablediOSTests)' != 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" /> <!-- Ref.Emit in XSLCompiledTransform --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml\tests\Xslt\XslCompiledTransformApi\System.Xml.Xsl.XslCompiledTransformApi.Tests.csproj" /> <!-- Functional tests on devices have problems with return codes from mlaunch --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\$(TargetOS)\Device\**\*.Test.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'iOSSimulator' and '$(TargetArchitecture)' == 'arm64' and '$(RunDisablediOSTests)' != 'true'"> <!-- Functional tests on arm64 simulator have problems with return codes from mlaunch --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Simulator\**\*.Test.csproj" /> </ItemGroup> <!-- Excluding all tests for devices until building on helix works properly --> <ItemGroup Condition="'$(TargetOS)' == 'tvOS' and '$(RunDisablediOSTests)' != 'true'"> <!-- Ref.Emit in XSLCompiledTransform --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml\tests\Xslt\XslCompiledTransformApi\System.Xml.Xsl.XslCompiledTransformApi.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection\tests\System.Reflection.Emit.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection.Emit.ILGeneration/tests/System.Reflection.Emit.ILGeneration.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection.Emit.Lightweight/tests/System.Reflection.Emit.Lightweight.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection.DispatchProxy/tests/System.Reflection.DispatchProxy.Tests.csproj" /> <!-- Has deps that JIT, need re-done in order to pass --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Hosting/tests/UnitTests/Microsoft.Extensions.Hosting.Unit.Tests.csproj" /> <!-- Test suites hang and time out. https://github.com/dotnet/runtime/issues/60713 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.DependencyInjection/tests/DI.External.Tests/Microsoft.Extensions.DependencyInjection.ExternalContainers.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.NetworkInformation/tests/FunctionalTests/System.Net.NetworkInformation.Functional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection/tests/System.Reflection.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Threading.Channels/tests/System.Threading.Channels.Tests.csproj" /> <!-- Functional tests on devices have problems with return codes from mlaunch --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\$(TargetOS)\Device\**\*.Test.csproj" /> </ItemGroup> <!-- Excluding all tests for aot catalyst until building on helix works properly --> <ItemGroup Condition="('$(TargetOS)' == 'MacCatalyst' and '$(BuildTestsOnHelix)' == 'true') and '$(RunDisablediOSTests)' != 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" /> <!-- No functional tests until helix stabilizes --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Simulator\**\*.Test.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'MacCatalyst'"> <ProjectExclusions Include="$(RepoRoot)/src/tests/FunctionalTests/iOS/Simulator/XmlFormatWriterGeneratorAOT/iOS.Simulator.XmlFormatWriterGeneratorAot.Test.csproj" /> </ItemGroup> <!-- Run only explicitly selected tests for Mac Catalyst in App Sandbox --> <ItemGroup Condition="'$(TargetOS)' == 'MacCatalyst' and '$(EnableAppSandbox)' == 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*/tests/**/*.Tests.csproj" /> <ProjectExclusions Include="$(RepoRoot)/src/tests/FunctionalTests/iOS/Simulator/**/*.Test.csproj" /> <!-- https://github.com/dotnet/runtime/pull/61507 --> <ProjectExclusions Remove="$(MSBuildThisFileDirectory)System.Diagnostics.Process/tests/System.Diagnostics.Process.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="('$(TargetOS)' == 'iOS' or '$(TargetOS)' == 'iOSSimulator' or '$(TargetOS)' == 'tvOS' or '$(TargetOS)' == 'tvOSSimulator' or '$(TargetOS)' == 'MacCatalyst') and '$(RunDisablediOSTests)' != 'true'"> <!-- PNSE --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Quic/tests/FunctionalTests/System.Net.Quic.Functional.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/51414 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.OpenSsl/tests/System.Security.Cryptography.OpenSsl.Tests.csproj" /> <!-- App Crash https://github.com/dotnet/runtime/issues/53624 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.CSharp/tests/Microsoft.CSharp.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Diagnostics.Tracing/tests/System.Diagnostics.Tracing.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Dynamic.Runtime/tests/System.Dynamic.Runtime.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Linq.Expressions/tests/System.Linq.Expressions.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Requests/tests/System.Net.Requests.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Security/tests/FunctionalTests/System.Net.Security.Tests.csproj" /> <ProjectExclusions Include="$(RepoRoot)/src/tests/FunctionalTests/iOS/Simulator/PInvoke/iOS.Simulator.PInvoke.Test.csproj" /> <ProjectExclusions Include="$(RepoRoot)/src/tests/FunctionalTests/tvOS/Simulator/AOT/tvOS.Simulator.Aot.Test.csproj" /> </ItemGroup> <ItemGroup Condition="('$(TargetOS)' == 'iOS' or '$(TargetOS)' == 'iOSSimulator' or '$(TargetOS)' == 'tvOS' or '$(TargetOS)' == 'tvOSSimulator') and '$(RunDisablediOSTests)' != 'true'"> <!-- https://github.com/dotnet/runtime/issues/51335 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.Pipes/tests/System.IO.Pipes.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Browser' and '$(RunDisabledWasmTests)' != 'true' and '$(RunAOTCompilation)' != 'true'"> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Browser' and '$(RunDisabledWasmTests)' != 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.NETCore.Platforms\tests\Microsoft.NETCore.Platforms.Tests.csproj" /> <!-- This test is disabled via an assembly-level attribute in source. We exclude it here to avoid queuing/running a work item entirely. https://github.com/dotnet/runtime/issues/35970 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Caching.Memory\tests\Microsoft.Extensions.Caching.Memory.Tests.csproj" /> <!-- This test is disabled via an assembly-level attribute in source. We exclude it here to avoid queuing/running a work item entirely. https://github.com/mono/mono/issues/16417 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.ComponentModel.Composition\tests\System.ComponentModel.Composition.Tests.csproj" /> <!-- Mono-Browser ignores runtimeconfig.template.json (e.g. for this it has "System.Globalization.EnforceJapaneseEraYearRanges": true) --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Globalization.Calendars\tests\CalendarTestWithConfigSwitch\System.Globalization.CalendarsWithConfigSwitch.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/37669 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.DependencyModel\tests\Microsoft.Extensions.DependencyModel.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Hosting\tests\UnitTests\Microsoft.Extensions.Hosting.Unit.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Csp\tests\System.Security.Cryptography.Csp.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Encoding\tests\System.Security.Cryptography.Encoding.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.OpenSsl\tests\System.Security.Cryptography.OpenSsl.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Pkcs\tests\System.Security.Cryptography.Pkcs.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Primitives\tests\System.Security.Cryptography.Primitives.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Xml\tests\System.Security.Cryptography.Xml.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.X509Certificates\tests\System.Security.Cryptography.X509Certificates.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Cose\tests\System.Security.Cryptography.Cose.Tests.csproj" /> <!-- This OuterLoop test requires browser UI, but the Helix agents are headless --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.WebSockets.Client\tests\wasm\System.Net.WebSockets.Client.Wasm.Tests.csproj" /> </ItemGroup> <!-- Aggressive Trimming related failures --> <ItemGroup Condition="('$(TargetOS)' != 'Browser' and '$(RunAOTCompilation)' == 'true' and '$(MonoForceInterpreter)' != 'true') or ('$(TargetOS)' == 'Browser' and '$(BuildAOTTestsOnHelix)' == 'true' and '$(RunDisabledWasmTests)' != 'true')"> <!-- Issue: https://github.com/dotnet/runtime/issues/59926 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.Serialization.Xml\tests\System.Runtime.Serialization.Xml.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="('$(TargetOS)' != 'Browser' and '$(RunAOTCompilation)' == 'true' and '$(MonoForceInterpreter)' != 'true') or ('$(TargetOS)' == 'Browser' and '$(BuildAOTTestsOnHelix)' == 'true' and '$(RunDisabledWasmTests)' != 'true') or ('$(TargetOS)' == 'iOS' and '$(BuildTestsOnHelix)' == 'true')"> <!-- Issue: https://github.com/dotnet/runtime/issues/50724 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Composition\tests\System.Composition.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Dynamic.Runtime\tests\System.Dynamic.Runtime.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.DependencyInjection\tests\DI.Tests\Microsoft.Extensions.DependencyInjection.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Logging.EventSource\tests\Microsoft.Extensions.Logging.EventSource.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Diagnostics.DiagnosticSource\tests\System.Diagnostics.DiagnosticSource.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Diagnostics.Tracing/tests/System.Diagnostics.Tracing.Tests.csproj" /> <!-- Issue: https://github.com/dotnet/runtime/issues/51708 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Linq.Expressions\tests\System.Linq.Expressions.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Browser' and '$(BrowserHost)' == 'windows' and '$(Scenario)' == 'WasmTestOnBrowser' and '$(RunDisabledWasmTestsOnWindows)' != 'true'"> <!-- Issue: https://github.com/dotnet/runtime/issues/55429 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.VisualBasic.Core\tests\Microsoft.VisualBasic.Core.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO\tests\System.IO.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Linq.Expressions\tests\System.Linq.Expressions.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Memory\tests\System.Memory.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime\tests\System.Runtime.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml\tests\Writers\XmlWriterApi\System.Xml.RW.XmlWriterApi.Tests.csproj" /> <ProjectExclusions Include="$(RepoRoot)\src\tests\BuildWasmApps\Wasm.Build.Tests\Wasm.Build.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'OSX' and '$(TargetArchitecture)' == 'arm64' and '$(RunDisabledAppleSiliconTests)' != 'true'"> <!-- ActiveIssue Apple Silicon No usable version of libssl was found https://github.com/dotnet/runtime/issues/49083 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Algorithms/tests/System.Security.Cryptography.Algorithms.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.OpenSsl/tests/System.Security.Cryptography.OpenSsl.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetArchitecture)' == 's390x' and '$(RunDisableds390xTests)' != 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Drawing.Common\tests\System.Drawing.Common.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TestSingleFile)' == 'true' and '$(TestNativeAot)' != 'true'"> <!-- Run only a small randomly chosen set of passing test suites --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" /> <ProjectExclusions Remove="$(MSBuildThisFileDirectory)System.Collections\tests\System.Collections.Tests.csproj" /> <ProjectExclusions Remove="$(MSBuildThisFileDirectory)System.IO.IsolatedStorage\tests\System.IO.IsolatedStorage.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TestNativeAot)' == 'true'"> <!-- Run only a small randomly chosen set of passing test suites --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" /> <ProjectExclusions Remove="$(MSBuildThisFileDirectory)System.Collections\tests\System.Collections.Tests.csproj" /> </ItemGroup> <ItemGroup> <ProjectExclusions Condition="'$(RunHighAOTResourceRequiringTestsOnly)' != 'true'" Include="@(HighAOTResourceRequiringProject)" /> </ItemGroup> <ItemGroup> <ProjectReference Condition="'$(RunSmokeTestsOnly)' == 'true'" Include="$(MSBuildThisFileDirectory)System.Runtime\tests\System.Runtime.Tests.csproj" /> <ProjectReference Condition="'$(RunHighAOTResourceRequiringTestsOnly)' == 'true'" Include="@(HighAOTResourceRequiringProject)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(RunSmokeTestsOnly)' != 'true' and '$(RunHighAOTResourceRequiringTestsOnly)' != 'true'"> <ProjectReference Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" Exclude="@(ProjectExclusions)" Condition="'$(TestAssemblies)' == 'true'" /> <ProjectReference Include="$(MSBuildThisFileDirectory)testPackages\testPackages.proj" Condition="'$(TestPackages)' == 'true'" /> <TrimmingTestProjects Include="$(MSBuildThisFileDirectory)*\tests\**\*.TrimmingTests.proj" Exclude="@(ProjectExclusions)" Condition="'$(TestTrimming)' == 'true'" AdditionalProperties="%(AdditionalProperties);SkipTrimmingProjectsRestore=true" /> <ProjectReference Include="@(TrimmingTestProjects)" /> <!-- wasm.build.tests are run on _WasmBuildTests job on CI, and with library tests locally. --> <ProjectReference Include="$(RepoRoot)\src\tests\BuildWasmApps\Wasm.Build.Tests\*.Tests.csproj" Exclude="@(ProjectExclusions)" Condition="'$(TargetOS)' == 'Browser' and (('$(ContinuousIntegrationBuild)' == 'true' and '$(TestWasmBuildTests)' == 'true') or ('$(ContinuousIntegrationBuild)' != 'true' and '$(TestAssemblies)' == 'true'))" BuildInParallel="false" /> <ProjectReference Include="$(RepoRoot)\src\tests\BuildWasmApps\Wasm.Debugger.Tests\*.Tests.csproj" Exclude="@(ProjectExclusions)" Condition="'$(TargetOS)' == 'Browser' and (('$(ContinuousIntegrationBuild)' == 'true' and '$(TestWasmDebuggerTests)' == 'true') or ('$(ContinuousIntegrationBuild)' != 'true' and '$(TestAssemblies)' == 'true'))" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(TargetOS)' == 'iOS'"> <!-- Only System.Runtime tests on iOS for now --> <ProjectReference Include="$(MSBuildThisFileDirectory)System.Runtime\tests\System.Runtime.Tests.csproj" /> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Device\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'iOSSimulator'"> <!-- https://github.com/dotnet/runtime/issues/57666 --> <!-- <ProjectReference Include="$(MonoProjectRoot)sample\iOS\Program.csproj" BuildInParallel="false" /> --> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Simulator\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'tvOS'"> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\tvOS\Device\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'MacCatalyst'"> <!-- https://github.com/dotnet/runtime/issues/57666 --> <!-- <ProjectReference Include="$(MonoProjectRoot)sample\iOS\Program.csproj" BuildInParallel="false" /> --> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Simulator\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'tvOSSimulator'"> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\tvOS\Simulator\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'Android'"> <ProjectReference Include="$(MonoProjectRoot)sample\Android\AndroidSampleApp.csproj" BuildInParallel="false" /> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\Android\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <PropertyGroup> <Samples_BuildInParallel Condition="'$(OS)' == 'Windows_NT'">false</Samples_BuildInParallel> <Samples_BuildInParallel Condition="'$(OS)' != 'Windows_NT'">true</Samples_BuildInParallel> </PropertyGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunWasmSamples)' == 'true'"> <ProjectReference Include="$(MonoProjectRoot)sample\wasm\**\*.Sample.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="$(Samples_BuildInParallel)" /> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\WebAssembly\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <Target Name="GenerateMergedCoverageReport" AfterTargets="Test" DependsOnTargets="GenerateCoverageReport" Condition="'$(TestAssemblies)' == 'true' and '$(Coverage)' == 'true'" /> <!-- Build Apple app bundles using AppBundleRoot --> <UsingTask Condition="'$(UseAppBundleRootForBuildingTests)' == 'true'" TaskName="XcodeCreateProject" AssemblyFile="$(AppleAppBuilderTasksAssemblyPath)" /> <UsingTask Condition="'$(UseAppBundleRootForBuildingTests)' == 'true'" TaskName="XcodeBuildApp" AssemblyFile="$(AppleAppBuilderTasksAssemblyPath)" /> <Target Condition="'$(UseAppBundleRootForBuildingTests)' == 'true'" Name="BuildAppleAppBundles" AfterTargets="Build"> <PropertyGroup> <!-- TODO: Unify this with TestArchiveTestsRoot in src/libraries/Directory.Build.props somehow, we can't use IsFunctionalTest==true here because it is only set in the context of the .csproj --> <TestArchiveNormalTestsRoot>$(TestArchiveRoot)tests/</TestArchiveNormalTestsRoot> <TestArchiveFunctionalTestsRoot>$(TestArchiveRoot)runonly/</TestArchiveFunctionalTestsRoot> <TestArchiveNormalTestsDir>$(TestArchiveNormalTestsRoot)$(OSPlatformConfig)/</TestArchiveNormalTestsDir> <TestArchiveFunctionalTestsDir>$(TestArchiveFunctionalTestsRoot)$(OSPlatformConfig)/</TestArchiveFunctionalTestsDir> <NormalTestsAppBundleRoot>$(AppBundleRoot)/tests/</NormalTestsAppBundleRoot> <FunctionalTestsAppBundleRoot>$(AppBundleRoot)/runonly/</FunctionalTestsAppBundleRoot> <NormalTestsAllAppBundlesRoot>$(AppBundleRoot)/tests.all/</NormalTestsAllAppBundlesRoot> <FunctionalTestsAllAppBundlesRoot>$(AppBundleRoot)/runonly.all/</FunctionalTestsAllAppBundlesRoot> </PropertyGroup> <ItemGroup> <NormalTestAppBundles Include="$(NormalTestsAppBundleRoot)*/AppBundle/CMakeLists.txt" /> <NormalTestCMakeEntries Include="cmake_minimum_required(VERSION 3.16)" /> <NormalTestCMakeEntries Include="project(NormalTestAppBundles)" /> <NormalTestCMakeEntries Include="add_subdirectory(%(NormalTestAppBundles.RootDir)%(NormalTestAppBundles.Directory) %(NormalTestAppBundles.RecursiveDir) EXCLUDE_FROM_ALL)" /> <FunctionalTestAppBundles Include="$(FunctionalTestsAppBundleRoot)*/AppBundle/CMakeLists.txt" /> <FunctionalTestCMakeEntries Include="cmake_minimum_required(VERSION 3.16)" /> <FunctionalTestCMakeEntries Include="project(FunctionalTestAppBundles)" /> <FunctionalTestCMakeEntries Include="add_subdirectory(%(FunctionalTestAppBundles.RootDir)%(FunctionalTestAppBundles.Directory) %(FunctionalTestAppBundles.RecursiveDir) EXCLUDE_FROM_ALL)" /> </ItemGroup> <WriteLinesToFile File="$(NormalTestsAllAppBundlesRoot)CMakeLists.txt" Lines="@(NormalTestCMakeEntries)" Overwrite="true" WriteOnlyWhenDifferent="true" /> <WriteLinesToFile File="$(FunctionalTestsAllAppBundlesRoot)CMakeLists.txt" Lines="@(FunctionalTestCMakeEntries)" Overwrite="true" WriteOnlyWhenDifferent="true" /> <XcodeCreateProject TargetOS="$(TargetOS)" Arch="$(TargetArchitecture)" ProjectName="NormalTestAppBundles" CMakeListsDirectory="$(NormalTestsAllAppBundlesRoot)" Condition="'@(NormalTestAppBundles)' != ''" /> <XcodeCreateProject TargetOS="$(TargetOS)" Arch="$(TargetArchitecture)" ProjectName="FunctionalTestAppBundles" CMakeListsDirectory="$(FunctionalTestsAllAppBundlesRoot)" Condition="'@(FunctionalTestAppBundles)' != ''" /> <MakeDir Directories="$(TestArchiveNormalTestsDir)" /> <MakeDir Directories="$(TestArchiveFunctionalTestsDir)" /> <ItemGroup> <!-- xcodeproj are directories, not files --> <XcodeProjects Condition="'@(NormalTestAppBundles)' != ''" Include="$([System.IO.Directory]::GetDirectories('$(NormalTestsAllAppBundlesRoot)NormalTestAppBundles/%(NormalTestAppBundles.RecursiveDir)', '*.xcodeproj'))" DestinationFolder="$(TestArchiveNormalTestsDir)" /> <XcodeProjects Condition="'@(FunctionalTestAppBundles)' != ''" Include="$([System.IO.Directory]::GetDirectories('$(FunctionalTestsAllAppBundlesRoot)FunctionalTestAppBundles/%(FunctionalTestAppBundles.RecursiveDir)', '*.xcodeproj'))" DestinationFolder="$(TestArchiveFunctionalTestsDir)" /> </ItemGroup> <XcodeBuildApp TargetOS="$(TargetOS)" Arch="$(TargetArchitecture)" XcodeProjectPath="%(XcodeProjects.Identity)" DevTeamProvisioning="$(DevTeamProvisioning)" Optimized="True" DestinationFolder="%(XcodeProjects.DestinationFolder)" /> <RemoveDir Condition="'$(ArchiveTests)' == 'true'" Directories="$(AppBundleRoot)" /> </Target> <!-- Restoring all trimming test projects upfront in one single call to RestoreTrimmingProjects so as to avoid possible race conditions that could happen if we restore each individually. --> <Target Name="RestoreTrimmingProjects" BeforeTargets="Build" Condition="'$(TestTrimming)' == 'true'"> <MSBuild Projects="@(TrimmingTestProjects)" Targets="GetTrimmingProjectsToRestore"> <Output TaskParameter="TargetOutputs" ItemName="_TrimmingProjectsToRestore" /> </MSBuild> <MSBuild Projects="@(_TrimmingProjectsToRestore)" Targets="Restore" Properties="MSBuildRestoreSessionId=$([System.Guid]::NewGuid());Configuration=$(Configuration)" /> </Target> </Project>
<Project Sdk="Microsoft.Build.Traversal"> <PropertyGroup Condition="'$(BuildAllConfigurations)' != 'true'"> <!-- Build for NetCoreAppCurrent by default if no BuildTargetFramework is supplied or if not all configurations are built. --> <TargetFramework>$([MSBuild]::ValueOrDefault('$(BuildTargetFramework)', '$(NetCoreAppCurrent)'))-$(TargetOS)</TargetFramework> <!-- Filter ProjectReferences to build the best matching target framework only. --> <FilterTraversalProjectReferences>true</FilterTraversalProjectReferences> </PropertyGroup> <PropertyGroup> <TestInParallel Condition="'$(Coverage)' == 'true'">false</TestInParallel> <!-- For tests we want to continue running if a test run failed. --> <TestContinueOnError>ErrorAndContinue</TestContinueOnError> <TraversalGlobalProperties>BuildAllProjects=true</TraversalGlobalProperties> <CoverageReportInputPath>$(ArtifactsBinDir)\*.Tests\**\coverage.opencover.xml</CoverageReportInputPath> <CoverageReportDir>$(ArtifactsDir)coverage</CoverageReportDir> <EnableCoverageSupport>true</EnableCoverageSupport> <TestAssemblies Condition="'$(TestAssemblies)' == ''">true</TestAssemblies> <TestPackages Condition="'$(TestPackages)' == ''">false</TestPackages> <TestTrimming Condition="'$(TestTrimming)' == ''">false</TestTrimming> <RunHighAOTResourceRequiringTestsOnly Condition="'$(RunHighAOTResourceRequiringTestsOnly)' == ''">false</RunHighAOTResourceRequiringTestsOnly> <!-- Don't build samples, and functional tests on EAT, AOT, WBT, and Debugger lanes --> <RunWasmSamples Condition="'$(RunSmokeTestsOnly)' != 'true' and '$(RunHighAOTResourceRequiringTestsOnly)' != 'true' and '$(TargetOS)' == 'Browser' and '$(BuildAOTTestsOnHelix)' != 'true' and '$(TestWasmBuildTests)' != 'true' and '$(TestWasmDebuggerTests)' != 'true'">true</RunWasmSamples> </PropertyGroup> <ItemGroup Condition="'$(TargetOS)' == 'Browser'"> <!-- https://github.com/dotnet/runtime/issues/65356 - OOM while linking --> <HighAOTResourceRequiringProject Include="$(MSBuildThisFileDirectory)System.Text.Json\tests\System.Text.Json.SourceGeneration.Tests\System.Text.Json.SourceGeneration.Roslyn3.11.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/65411 - possible OOM when compiling System.Text.Json.SourceGeneration.Roslyn4.0.Tests.dll.bc -> .o --> <HighAOTResourceRequiringProject Include="$(MSBuildThisFileDirectory)System.Text.Json\tests\System.Text.Json.SourceGeneration.Tests\System.Text.Json.SourceGeneration.Roslyn4.0.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/61524 - OOM while linking --> <HighAOTResourceRequiringProject Include="$(MSBuildThisFileDirectory)System.Text.Json\tests\System.Text.Json.Tests\System.Text.Json.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/66647 --> <HighAOTResourceRequiringProject Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Logging.Abstractions\tests\Microsoft.Extensions.Logging.Generators.Tests\Microsoft.Extensions.Logging.Generators.Roslyn3.11.Tests.csproj" /> </ItemGroup> <!-- Samples which are too complex for CI --> <ItemGroup Condition="'$(TargetOS)' == 'Browser'"> <ProjectExclusions Include="$(MonoProjectRoot)sample\wasm\console-node-ts\Wasm.Console.Node.TS.Sample.csproj" /> <ProjectExclusions Include="$(MonoProjectRoot)sample\wasm\browser-webpack\Wasm.Browser.WebPack.Sample.csproj" /> <ProjectExclusions Include="$(MonoProjectRoot)sample\wasm\browser-nextjs\Wasm.Browser.NextJs.Sample.csproj" /> <!-- These tests are completely disabled on wasm --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.RegularExpressions/tests/System.Text.RegularExpressions.Generators.Tests/System.Text.RegularExpressions.Generators.Tests.csproj" /> </ItemGroup> <!-- Wasm aot on all platforms --> <ItemGroup Condition="'$(TargetOS)' == 'Browser' and '$(BuildAOTTestsOnHelix)' == 'true' and '$(RunDisabledWasmTests)' != 'true' and '$(RunAOTCompilation)' == 'true'"> <!-- https://github.com/dotnet/runtime/issues/66118 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.RegularExpressions\tests\UnitTests\System.Text.RegularExpressions.Unit.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/61756 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.RegularExpressions\tests\FunctionalTests\System.Text.RegularExpressions.Tests.csproj" /> </ItemGroup> <!-- Projects that don't support code coverage measurement. --> <ItemGroup Condition="'$(Coverage)' == 'true'"> <ProjectExclusions Include="$(CommonTestPath)Common.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.XmlSerializer.Generator\tests\Microsoft.XmlSerializer.Generator.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Transactions.Local\tests\System.Transactions.Local.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetArchitecture)' == 'ARMv6'"> <!-- https://github.com/dotnet/runtime/issues/64673 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Ping\tests\FunctionalTests\System.Net.Ping.Functional.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetsMobile)' == 'true' or '$(TargetArchitecture)' == 'ARMv6'"> <!-- LibraryImportGenerator runtime tests depend on DNNE, which does not support mobile platforms. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\LibraryImportGenerator.Tests\LibraryImportGenerator.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\LibraryImportGenerator.UnitTests\LibraryImportGenerator.Unit.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'windows' and '$(TargetArchitecture)' == 'arm'"> <!-- LibraryImportGenerator runtime tests depend on DNNE, which does not support Windows ARM32 as we don't officially support it. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\LibraryImportGenerator.Tests\LibraryImportGenerator.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetArchitecture)' == 'armel'"> <!-- LibraryImportGenerator runtime tests depend on DNNE, which does not support armel as we don't officially support it. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\LibraryImportGenerator.Tests\LibraryImportGenerator.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetArchitecture)' == 'arm'"> <!-- Issue: https://github.com/dotnet/runtime/issues/60705 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\LibraryImportGenerator.UnitTests\LibraryImportGenerator.Unit.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'FreeBSD'"> <!-- LibraryImportGenerator runtime tests build depends pulling down a pre-built nethost binary, which is not available for FreeBSD. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\LibraryImportGenerator.Tests\LibraryImportGenerator.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'linux' and '$(TargetArchitecture)' == 's390x'"> <!-- LibraryImportGenerator runtime tests build depends pulling down a pre-built nethost binary, which is not available for s390x. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\LibraryImportGenerator.Tests\LibraryImportGenerator.Tests.csproj" /> <!-- LibraryImportGenerator unit tests fail since NuGet 5.6.0 signature verification does not work on big-endian systems (needs >= 5.11.0). --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\LibraryImportGenerator.UnitTests\LibraryImportGenerator.Unit.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Windows' and '$(RuntimeFlavor)' == 'Mono' and '$(RunDisabledMonoTestsOnWindows)' != 'true'"> <!-- Issue: https://github.com/dotnet/runtime/issues/53281 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.WebSockets.Client\tests\System.Net.WebSockets.Client.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\LibraryImportGenerator.UnitTests\LibraryImportGenerator.Unit.Tests.csproj" /> <!-- Issue: https://github.com/dotnet/runtime/issues/63723 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Drawing.Common\tests\System.Drawing.Common.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' != 'Windows'"> <!-- windows specific tests --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Hosting.WindowsServices/tests/Microsoft.Extensions.Hosting.WindowsServices.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android'"> <!-- Never going to run on Android --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.OpenSsl\tests\System.Security.Cryptography.OpenSsl.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android' and '$(RunDisabledAndroidTests)' != 'true'"> <!-- Tests time out intermittently --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Hosting\tests\UnitTests\Microsoft.Extensions.Hosting.Unit.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Security\tests\FunctionalTests\System.Net.Security.Tests.csproj" /> <!-- Tests crash --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Globalization\tests\Invariant\Invariant.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.FileSystem\tests\System.IO.FileSystem.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.FileSystem.Watcher\tests\System.IO.FileSystem.Watcher.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.Ports\tests\System.IO.Ports.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Quic\tests\FunctionalTests\System.Net.Quic.Functional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection\tests\System.Reflection.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.Serialization.Formatters\tests\System.Runtime.Serialization.Formatters.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Algorithms\tests\System.Security.Cryptography.Algorithms.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Csp\tests\System.Security.Cryptography.Csp.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Threading.Thread\tests\System.Threading.Thread.Tests.csproj" /> <!-- Actual test failures --> <!-- https://github.com/dotnet/runtime/issues/50871 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.DependencyInjection\tests\DI.Tests\Microsoft.Extensions.DependencyInjection.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/50874 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Logging.EventSource\tests\Microsoft.Extensions.Logging.EventSource.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/50923 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Data.Common\tests\System.Data.Common.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/50926 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Diagnostics.Tracing\tests\System.Diagnostics.Tracing.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/49936 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.Json\tests\System.Text.Json.Tests\System.Text.Json.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Http\tests\FunctionalTests\System.Net.Http.Functional.Tests.csproj" /> <!-- Execution may be compromised --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Caching.Memory\tests\Microsoft.Extensions.Caching.Memory.Tests.csproj" /> <!-- PSNE --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Console/tests/System.Console.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Primitives/tests/FunctionalTests/System.Net.Primitives.Functional.Tests.csproj" /> <!-- Crashes on CI (possibly flakey) --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml/tests/Misc/System.Xml.Misc.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Dynamic.Runtime/tests/System.Dynamic.Runtime.Tests.csproj"/> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android' and '$(TargetArchitecture)' == 'arm64' and '$(RunDisabledAndroidTests)' != 'true'"> <!-- Crashes on CI (possibly flakey) https://github.com/dotnet/runtime/issues/52615 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Configuration/tests/FunctionalTests/Microsoft.Extensions.Configuration.Functional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Configuration.Binder/tests/Microsoft.Extensions.Configuration.Binder.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.FileProviders.Physical/tests/Microsoft.Extensions.FileProviders.Physical.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Win32.Primitives/tests/Microsoft.Win32.Primitives.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Collections.Concurrent/tests/System.Collections.Concurrent.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Collections.Specialized/tests/System.Collections.Specialized.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.ComponentModel.Annotations/tests/System.ComponentModel.Annotations.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Composition.Hosting/tests/System.Composition.Hosting.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.FileSystem.Primitives/tests/System.IO.FileSystem.Primitives.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Memory/tests/System.Memory.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Mail/tests/Functional/System.Net.Mail.Functional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.NameResolution/tests/PalTests/System.Net.NameResolution.Pal.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.WebClient/tests/System.Net.WebClient.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Numerics.Tensors/tests/System.Numerics.Tensors.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml/tests/XmlReader/Tests/System.Xml.RW.XmlReader.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml/tests/XPath/XPathDocument/System.Xml.XPath.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Uri/tests/ExtendedFunctionalTests/System.Private.Uri.ExtendedFunctional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection.DispatchProxy/tests/System.Reflection.DispatchProxy.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android' and '$(TargetArchitecture)' == 'x64' and '$(RunDisabledAndroidTests)' != 'true'"> <!-- Test flakiness on x64 https://github.com/dotnet/runtime/issues/49937 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Threading\tests\System.Threading.Tests.csproj" /> <!-- Out of memory https://github.com/dotnet/runtime/issues/62547 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography\tests\System.Security.Cryptography.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android' and '$(TargetArchitecture)' == 'x86' and '$(RunDisabledAndroidTests)' != 'true'"> <!-- Crashes only on x86 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Primitives\tests\Microsoft.Extensions.Primitives.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.Extensions\tests\System.Runtime.Extensions.Tests.csproj" /> <!-- Out of memory https://github.com/dotnet/runtime/issues/62547 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography\tests\System.Security.Cryptography.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/50493 --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\Android\Device_Emulator\AOT\Android.Device_Emulator.Aot.Test.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'iOS' and '$(RunDisablediOSTests)' != 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" /> <!-- Ref.Emit in XSLCompiledTransform --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml\tests\Xslt\XslCompiledTransformApi\System.Xml.Xsl.XslCompiledTransformApi.Tests.csproj" /> <!-- Functional tests on devices have problems with return codes from mlaunch --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\$(TargetOS)\Device\**\*.Test.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'iOSSimulator' and '$(TargetArchitecture)' == 'arm64' and '$(RunDisablediOSTests)' != 'true'"> <!-- Functional tests on arm64 simulator have problems with return codes from mlaunch --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Simulator\**\*.Test.csproj" /> </ItemGroup> <!-- Excluding all tests for devices until building on helix works properly --> <ItemGroup Condition="'$(TargetOS)' == 'tvOS' and '$(RunDisablediOSTests)' != 'true'"> <!-- Ref.Emit in XSLCompiledTransform --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml\tests\Xslt\XslCompiledTransformApi\System.Xml.Xsl.XslCompiledTransformApi.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection\tests\System.Reflection.Emit.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection.Emit.ILGeneration/tests/System.Reflection.Emit.ILGeneration.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection.Emit.Lightweight/tests/System.Reflection.Emit.Lightweight.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection.DispatchProxy/tests/System.Reflection.DispatchProxy.Tests.csproj" /> <!-- Has deps that JIT, need re-done in order to pass --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Hosting/tests/UnitTests/Microsoft.Extensions.Hosting.Unit.Tests.csproj" /> <!-- Test suites hang and time out. https://github.com/dotnet/runtime/issues/60713 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.DependencyInjection/tests/DI.External.Tests/Microsoft.Extensions.DependencyInjection.ExternalContainers.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.NetworkInformation/tests/FunctionalTests/System.Net.NetworkInformation.Functional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection/tests/System.Reflection.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Threading.Channels/tests/System.Threading.Channels.Tests.csproj" /> <!-- Functional tests on devices have problems with return codes from mlaunch --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\$(TargetOS)\Device\**\*.Test.csproj" /> </ItemGroup> <!-- Excluding all tests for aot catalyst until building on helix works properly --> <ItemGroup Condition="('$(TargetOS)' == 'MacCatalyst' and '$(BuildTestsOnHelix)' == 'true') and '$(RunDisablediOSTests)' != 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" /> <!-- No functional tests until helix stabilizes --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Simulator\**\*.Test.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'MacCatalyst'"> <ProjectExclusions Include="$(RepoRoot)/src/tests/FunctionalTests/iOS/Simulator/XmlFormatWriterGeneratorAOT/iOS.Simulator.XmlFormatWriterGeneratorAot.Test.csproj" /> </ItemGroup> <!-- Run only explicitly selected tests for Mac Catalyst in App Sandbox --> <ItemGroup Condition="'$(TargetOS)' == 'MacCatalyst' and '$(EnableAppSandbox)' == 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*/tests/**/*.Tests.csproj" /> <ProjectExclusions Include="$(RepoRoot)/src/tests/FunctionalTests/iOS/Simulator/**/*.Test.csproj" /> <!-- https://github.com/dotnet/runtime/pull/61507 --> <ProjectExclusions Remove="$(MSBuildThisFileDirectory)System.Diagnostics.Process/tests/System.Diagnostics.Process.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="('$(TargetOS)' == 'iOS' or '$(TargetOS)' == 'iOSSimulator' or '$(TargetOS)' == 'tvOS' or '$(TargetOS)' == 'tvOSSimulator' or '$(TargetOS)' == 'MacCatalyst') and '$(RunDisablediOSTests)' != 'true'"> <!-- PNSE --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Quic/tests/FunctionalTests/System.Net.Quic.Functional.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/51414 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.OpenSsl/tests/System.Security.Cryptography.OpenSsl.Tests.csproj" /> <!-- App Crash https://github.com/dotnet/runtime/issues/53624 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.CSharp/tests/Microsoft.CSharp.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Diagnostics.Tracing/tests/System.Diagnostics.Tracing.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Dynamic.Runtime/tests/System.Dynamic.Runtime.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Linq.Expressions/tests/System.Linq.Expressions.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Requests/tests/System.Net.Requests.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Security/tests/FunctionalTests/System.Net.Security.Tests.csproj" /> <ProjectExclusions Include="$(RepoRoot)/src/tests/FunctionalTests/iOS/Simulator/PInvoke/iOS.Simulator.PInvoke.Test.csproj" /> <ProjectExclusions Include="$(RepoRoot)/src/tests/FunctionalTests/tvOS/Simulator/AOT/tvOS.Simulator.Aot.Test.csproj" /> </ItemGroup> <ItemGroup Condition="('$(TargetOS)' == 'iOS' or '$(TargetOS)' == 'iOSSimulator' or '$(TargetOS)' == 'tvOS' or '$(TargetOS)' == 'tvOSSimulator') and '$(RunDisablediOSTests)' != 'true'"> <!-- https://github.com/dotnet/runtime/issues/51335 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.Pipes/tests/System.IO.Pipes.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Browser' and '$(RunDisabledWasmTests)' != 'true' and '$(RunAOTCompilation)' != 'true'"> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Browser' and '$(RunDisabledWasmTests)' != 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.NETCore.Platforms\tests\Microsoft.NETCore.Platforms.Tests.csproj" /> <!-- This test is disabled via an assembly-level attribute in source. We exclude it here to avoid queuing/running a work item entirely. https://github.com/dotnet/runtime/issues/35970 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Caching.Memory\tests\Microsoft.Extensions.Caching.Memory.Tests.csproj" /> <!-- This test is disabled via an assembly-level attribute in source. We exclude it here to avoid queuing/running a work item entirely. https://github.com/mono/mono/issues/16417 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.ComponentModel.Composition\tests\System.ComponentModel.Composition.Tests.csproj" /> <!-- Mono-Browser ignores runtimeconfig.template.json (e.g. for this it has "System.Globalization.EnforceJapaneseEraYearRanges": true) --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Globalization.Calendars\tests\CalendarTestWithConfigSwitch\System.Globalization.CalendarsWithConfigSwitch.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/37669 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.DependencyModel\tests\Microsoft.Extensions.DependencyModel.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Hosting\tests\UnitTests\Microsoft.Extensions.Hosting.Unit.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Csp\tests\System.Security.Cryptography.Csp.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Encoding\tests\System.Security.Cryptography.Encoding.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.OpenSsl\tests\System.Security.Cryptography.OpenSsl.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Pkcs\tests\System.Security.Cryptography.Pkcs.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Primitives\tests\System.Security.Cryptography.Primitives.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Xml\tests\System.Security.Cryptography.Xml.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.X509Certificates\tests\System.Security.Cryptography.X509Certificates.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Cose\tests\System.Security.Cryptography.Cose.Tests.csproj" /> <!-- This OuterLoop test requires browser UI, but the Helix agents are headless --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.WebSockets.Client\tests\wasm\System.Net.WebSockets.Client.Wasm.Tests.csproj" /> </ItemGroup> <!-- Aggressive Trimming related failures --> <ItemGroup Condition="('$(TargetOS)' != 'Browser' and '$(RunAOTCompilation)' == 'true' and '$(MonoForceInterpreter)' != 'true') or ('$(TargetOS)' == 'Browser' and '$(BuildAOTTestsOnHelix)' == 'true' and '$(RunDisabledWasmTests)' != 'true')"> <!-- Issue: https://github.com/dotnet/runtime/issues/59926 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.Serialization.Xml\tests\System.Runtime.Serialization.Xml.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="('$(TargetOS)' != 'Browser' and '$(RunAOTCompilation)' == 'true' and '$(MonoForceInterpreter)' != 'true') or ('$(TargetOS)' == 'Browser' and '$(BuildAOTTestsOnHelix)' == 'true' and '$(RunDisabledWasmTests)' != 'true') or ('$(TargetOS)' == 'iOS' and '$(BuildTestsOnHelix)' == 'true')"> <!-- Issue: https://github.com/dotnet/runtime/issues/50724 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Composition\tests\System.Composition.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Dynamic.Runtime\tests\System.Dynamic.Runtime.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.DependencyInjection\tests\DI.Tests\Microsoft.Extensions.DependencyInjection.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Logging.EventSource\tests\Microsoft.Extensions.Logging.EventSource.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Diagnostics.DiagnosticSource\tests\System.Diagnostics.DiagnosticSource.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Diagnostics.Tracing/tests/System.Diagnostics.Tracing.Tests.csproj" /> <!-- Issue: https://github.com/dotnet/runtime/issues/51708 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Linq.Expressions\tests\System.Linq.Expressions.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Browser' and '$(BrowserHost)' == 'windows' and '$(Scenario)' == 'WasmTestOnBrowser' and '$(RunDisabledWasmTestsOnWindows)' != 'true'"> <!-- Issue: https://github.com/dotnet/runtime/issues/55429 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.VisualBasic.Core\tests\Microsoft.VisualBasic.Core.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO\tests\System.IO.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Linq.Expressions\tests\System.Linq.Expressions.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Memory\tests\System.Memory.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime\tests\System.Runtime.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml\tests\Writers\XmlWriterApi\System.Xml.RW.XmlWriterApi.Tests.csproj" /> <ProjectExclusions Include="$(RepoRoot)\src\tests\BuildWasmApps\Wasm.Build.Tests\Wasm.Build.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'OSX' and '$(TargetArchitecture)' == 'arm64' and '$(RunDisabledAppleSiliconTests)' != 'true'"> <!-- ActiveIssue Apple Silicon No usable version of libssl was found https://github.com/dotnet/runtime/issues/49083 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Algorithms/tests/System.Security.Cryptography.Algorithms.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.OpenSsl/tests/System.Security.Cryptography.OpenSsl.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetArchitecture)' == 's390x' and '$(RunDisableds390xTests)' != 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Drawing.Common\tests\System.Drawing.Common.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TestSingleFile)' == 'true' and '$(TestNativeAot)' != 'true'"> <!-- Run only a small randomly chosen set of passing test suites --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" /> <ProjectExclusions Remove="$(MSBuildThisFileDirectory)System.Collections\tests\System.Collections.Tests.csproj" /> <ProjectExclusions Remove="$(MSBuildThisFileDirectory)System.IO.IsolatedStorage\tests\System.IO.IsolatedStorage.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TestNativeAot)' == 'true'"> <!-- Run only a small randomly chosen set of passing test suites --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" /> <ProjectExclusions Remove="$(MSBuildThisFileDirectory)System.Collections\tests\System.Collections.Tests.csproj" /> </ItemGroup> <ItemGroup> <ProjectExclusions Condition="'$(RunHighAOTResourceRequiringTestsOnly)' != 'true'" Include="@(HighAOTResourceRequiringProject)" /> </ItemGroup> <ItemGroup> <ProjectReference Condition="'$(RunSmokeTestsOnly)' == 'true'" Include="$(MSBuildThisFileDirectory)System.Runtime\tests\System.Runtime.Tests.csproj" /> <ProjectReference Condition="'$(RunHighAOTResourceRequiringTestsOnly)' == 'true'" Include="@(HighAOTResourceRequiringProject)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(RunSmokeTestsOnly)' != 'true' and '$(RunHighAOTResourceRequiringTestsOnly)' != 'true'"> <ProjectReference Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" Exclude="@(ProjectExclusions)" Condition="'$(TestAssemblies)' == 'true'" /> <ProjectReference Include="$(MSBuildThisFileDirectory)testPackages\testPackages.proj" Condition="'$(TestPackages)' == 'true'" /> <TrimmingTestProjects Include="$(MSBuildThisFileDirectory)*\tests\**\*.TrimmingTests.proj" Exclude="@(ProjectExclusions)" Condition="'$(TestTrimming)' == 'true'" AdditionalProperties="%(AdditionalProperties);SkipTrimmingProjectsRestore=true" /> <ProjectReference Include="@(TrimmingTestProjects)" /> <!-- wasm.build.tests are run on _WasmBuildTests job on CI, and with library tests locally. --> <ProjectReference Include="$(RepoRoot)\src\tests\BuildWasmApps\Wasm.Build.Tests\*.Tests.csproj" Exclude="@(ProjectExclusions)" Condition="'$(TargetOS)' == 'Browser' and (('$(ContinuousIntegrationBuild)' == 'true' and '$(TestWasmBuildTests)' == 'true') or ('$(ContinuousIntegrationBuild)' != 'true' and '$(TestAssemblies)' == 'true'))" BuildInParallel="false" /> <ProjectReference Include="$(RepoRoot)\src\tests\BuildWasmApps\Wasm.Debugger.Tests\*.Tests.csproj" Exclude="@(ProjectExclusions)" Condition="'$(TargetOS)' == 'Browser' and (('$(ContinuousIntegrationBuild)' == 'true' and '$(TestWasmDebuggerTests)' == 'true') or ('$(ContinuousIntegrationBuild)' != 'true' and '$(TestAssemblies)' == 'true'))" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(TargetOS)' == 'iOS'"> <!-- Only System.Runtime tests on iOS for now --> <ProjectReference Include="$(MSBuildThisFileDirectory)System.Runtime\tests\System.Runtime.Tests.csproj" /> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Device\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'iOSSimulator'"> <!-- https://github.com/dotnet/runtime/issues/57666 --> <!-- <ProjectReference Include="$(MonoProjectRoot)sample\iOS\Program.csproj" BuildInParallel="false" /> --> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Simulator\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'tvOS'"> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\tvOS\Device\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'MacCatalyst'"> <!-- https://github.com/dotnet/runtime/issues/57666 --> <!-- <ProjectReference Include="$(MonoProjectRoot)sample\iOS\Program.csproj" BuildInParallel="false" /> --> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Simulator\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'tvOSSimulator'"> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\tvOS\Simulator\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'Android'"> <ProjectReference Include="$(MonoProjectRoot)sample\Android\AndroidSampleApp.csproj" BuildInParallel="false" /> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\Android\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <PropertyGroup> <Samples_BuildInParallel Condition="'$(OS)' == 'Windows_NT'">false</Samples_BuildInParallel> <Samples_BuildInParallel Condition="'$(OS)' != 'Windows_NT'">true</Samples_BuildInParallel> </PropertyGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunWasmSamples)' == 'true'"> <ProjectReference Include="$(MonoProjectRoot)sample\wasm\**\*.Sample.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="$(Samples_BuildInParallel)" /> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\WebAssembly\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <Target Name="GenerateMergedCoverageReport" AfterTargets="Test" DependsOnTargets="GenerateCoverageReport" Condition="'$(TestAssemblies)' == 'true' and '$(Coverage)' == 'true'" /> <!-- Build Apple app bundles using AppBundleRoot --> <UsingTask Condition="'$(UseAppBundleRootForBuildingTests)' == 'true'" TaskName="XcodeCreateProject" AssemblyFile="$(AppleAppBuilderTasksAssemblyPath)" /> <UsingTask Condition="'$(UseAppBundleRootForBuildingTests)' == 'true'" TaskName="XcodeBuildApp" AssemblyFile="$(AppleAppBuilderTasksAssemblyPath)" /> <Target Condition="'$(UseAppBundleRootForBuildingTests)' == 'true'" Name="BuildAppleAppBundles" AfterTargets="Build"> <PropertyGroup> <!-- TODO: Unify this with TestArchiveTestsRoot in src/libraries/Directory.Build.props somehow, we can't use IsFunctionalTest==true here because it is only set in the context of the .csproj --> <TestArchiveNormalTestsRoot>$(TestArchiveRoot)tests/</TestArchiveNormalTestsRoot> <TestArchiveFunctionalTestsRoot>$(TestArchiveRoot)runonly/</TestArchiveFunctionalTestsRoot> <TestArchiveNormalTestsDir>$(TestArchiveNormalTestsRoot)$(OSPlatformConfig)/</TestArchiveNormalTestsDir> <TestArchiveFunctionalTestsDir>$(TestArchiveFunctionalTestsRoot)$(OSPlatformConfig)/</TestArchiveFunctionalTestsDir> <NormalTestsAppBundleRoot>$(AppBundleRoot)/tests/</NormalTestsAppBundleRoot> <FunctionalTestsAppBundleRoot>$(AppBundleRoot)/runonly/</FunctionalTestsAppBundleRoot> <NormalTestsAllAppBundlesRoot>$(AppBundleRoot)/tests.all/</NormalTestsAllAppBundlesRoot> <FunctionalTestsAllAppBundlesRoot>$(AppBundleRoot)/runonly.all/</FunctionalTestsAllAppBundlesRoot> </PropertyGroup> <ItemGroup> <NormalTestAppBundles Include="$(NormalTestsAppBundleRoot)*/AppBundle/CMakeLists.txt" /> <NormalTestCMakeEntries Include="cmake_minimum_required(VERSION 3.16)" /> <NormalTestCMakeEntries Include="project(NormalTestAppBundles)" /> <NormalTestCMakeEntries Include="add_subdirectory(%(NormalTestAppBundles.RootDir)%(NormalTestAppBundles.Directory) %(NormalTestAppBundles.RecursiveDir) EXCLUDE_FROM_ALL)" /> <FunctionalTestAppBundles Include="$(FunctionalTestsAppBundleRoot)*/AppBundle/CMakeLists.txt" /> <FunctionalTestCMakeEntries Include="cmake_minimum_required(VERSION 3.16)" /> <FunctionalTestCMakeEntries Include="project(FunctionalTestAppBundles)" /> <FunctionalTestCMakeEntries Include="add_subdirectory(%(FunctionalTestAppBundles.RootDir)%(FunctionalTestAppBundles.Directory) %(FunctionalTestAppBundles.RecursiveDir) EXCLUDE_FROM_ALL)" /> </ItemGroup> <WriteLinesToFile File="$(NormalTestsAllAppBundlesRoot)CMakeLists.txt" Lines="@(NormalTestCMakeEntries)" Overwrite="true" WriteOnlyWhenDifferent="true" /> <WriteLinesToFile File="$(FunctionalTestsAllAppBundlesRoot)CMakeLists.txt" Lines="@(FunctionalTestCMakeEntries)" Overwrite="true" WriteOnlyWhenDifferent="true" /> <XcodeCreateProject TargetOS="$(TargetOS)" Arch="$(TargetArchitecture)" ProjectName="NormalTestAppBundles" CMakeListsDirectory="$(NormalTestsAllAppBundlesRoot)" Condition="'@(NormalTestAppBundles)' != ''" /> <XcodeCreateProject TargetOS="$(TargetOS)" Arch="$(TargetArchitecture)" ProjectName="FunctionalTestAppBundles" CMakeListsDirectory="$(FunctionalTestsAllAppBundlesRoot)" Condition="'@(FunctionalTestAppBundles)' != ''" /> <MakeDir Directories="$(TestArchiveNormalTestsDir)" /> <MakeDir Directories="$(TestArchiveFunctionalTestsDir)" /> <ItemGroup> <!-- xcodeproj are directories, not files --> <XcodeProjects Condition="'@(NormalTestAppBundles)' != ''" Include="$([System.IO.Directory]::GetDirectories('$(NormalTestsAllAppBundlesRoot)NormalTestAppBundles/%(NormalTestAppBundles.RecursiveDir)', '*.xcodeproj'))" DestinationFolder="$(TestArchiveNormalTestsDir)" /> <XcodeProjects Condition="'@(FunctionalTestAppBundles)' != ''" Include="$([System.IO.Directory]::GetDirectories('$(FunctionalTestsAllAppBundlesRoot)FunctionalTestAppBundles/%(FunctionalTestAppBundles.RecursiveDir)', '*.xcodeproj'))" DestinationFolder="$(TestArchiveFunctionalTestsDir)" /> </ItemGroup> <XcodeBuildApp TargetOS="$(TargetOS)" Arch="$(TargetArchitecture)" XcodeProjectPath="%(XcodeProjects.Identity)" DevTeamProvisioning="$(DevTeamProvisioning)" Optimized="True" DestinationFolder="%(XcodeProjects.DestinationFolder)" /> <RemoveDir Condition="'$(ArchiveTests)' == 'true'" Directories="$(AppBundleRoot)" /> </Target> <!-- Restoring all trimming test projects upfront in one single call to RestoreTrimmingProjects so as to avoid possible race conditions that could happen if we restore each individually. --> <Target Name="RestoreTrimmingProjects" BeforeTargets="Build" Condition="'$(TestTrimming)' == 'true'"> <MSBuild Projects="@(TrimmingTestProjects)" Targets="GetTrimmingProjectsToRestore"> <Output TaskParameter="TargetOutputs" ItemName="_TrimmingProjectsToRestore" /> </MSBuild> <MSBuild Projects="@(_TrimmingProjectsToRestore)" Targets="Restore" Properties="MSBuildRestoreSessionId=$([System.Guid]::NewGuid());Configuration=$(Configuration)" /> </Target> </Project>
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/StringConverter.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Globalization; namespace System.ComponentModel { /// <summary> /// Provides a type converter to convert string objects to and from various other /// representations. /// </summary> public class StringConverter : TypeConverter { /// <summary> /// Gets a value indicating whether this converter can convert an object in the /// given source type to a string using the specified context. /// </summary> public override bool CanConvertFrom(ITypeDescriptorContext? context, Type sourceType) { return sourceType == typeof(string) || base.CanConvertFrom(context, sourceType); } /// <summary> /// Converts the specified value object to a string object. /// </summary> public override object? ConvertFrom(ITypeDescriptorContext? context, CultureInfo? culture, object? value) { if (value is string) { return (string)value; } if (value == null) { return string.Empty; } return base.ConvertFrom(context, culture, value); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Globalization; namespace System.ComponentModel { /// <summary> /// Provides a type converter to convert string objects to and from various other /// representations. /// </summary> public class StringConverter : TypeConverter { /// <summary> /// Gets a value indicating whether this converter can convert an object in the /// given source type to a string using the specified context. /// </summary> public override bool CanConvertFrom(ITypeDescriptorContext? context, Type sourceType) { return sourceType == typeof(string) || base.CanConvertFrom(context, sourceType); } /// <summary> /// Converts the specified value object to a string object. /// </summary> public override object? ConvertFrom(ITypeDescriptorContext? context, CultureInfo? culture, object? value) { if (value is string) { return (string)value; } if (value == null) { return string.Empty; } return base.ConvertFrom(context, culture, value); } } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/System.Threading.Thread/ref/System.Threading.Thread.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ------------------------------------------------------------------------------ // Changes to this file must follow the https://aka.ms/api-review process. // ------------------------------------------------------------------------------ namespace System { public sealed partial class LocalDataStoreSlot { internal LocalDataStoreSlot() { } ~LocalDataStoreSlot() { } } } namespace System.Threading { public enum ApartmentState { STA = 0, MTA = 1, Unknown = 2, } public sealed partial class CompressedStack : System.Runtime.Serialization.ISerializable { internal CompressedStack() { } public static System.Threading.CompressedStack Capture() { throw null; } public System.Threading.CompressedStack CreateCopy() { throw null; } public static System.Threading.CompressedStack GetCompressedStack() { throw null; } public void GetObjectData(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context) { } public static void Run(System.Threading.CompressedStack compressedStack, System.Threading.ContextCallback callback, object? state) { } } public delegate void ParameterizedThreadStart(object? obj); public sealed partial class Thread : System.Runtime.ConstrainedExecution.CriticalFinalizerObject { public Thread(System.Threading.ParameterizedThreadStart start) { } public Thread(System.Threading.ParameterizedThreadStart start, int maxStackSize) { } public Thread(System.Threading.ThreadStart start) { } public Thread(System.Threading.ThreadStart start, int maxStackSize) { } [System.ObsoleteAttribute("The ApartmentState property has been deprecated. Use GetApartmentState, SetApartmentState or TrySetApartmentState instead.")] public System.Threading.ApartmentState ApartmentState { get { throw null; } set { } } public System.Globalization.CultureInfo CurrentCulture { get { throw null; } set { } } public static System.Security.Principal.IPrincipal? CurrentPrincipal { get { throw null; } set { } } public static System.Threading.Thread CurrentThread { get { throw null; } } public System.Globalization.CultureInfo CurrentUICulture { get { throw null; } set { } } public System.Threading.ExecutionContext? ExecutionContext { get { throw null; } } public bool IsAlive { get { throw null; } } public bool IsBackground { get { throw null; } set { } } public bool IsThreadPoolThread { get { throw null; } } public int ManagedThreadId { get { throw null; } } public string? Name { get { throw null; } set { } } public System.Threading.ThreadPriority Priority { get { throw null; } set { } } public System.Threading.ThreadState ThreadState { get { throw null; } } [System.ObsoleteAttribute("Thread.Abort is not supported and throws PlatformNotSupportedException.", DiagnosticId = "SYSLIB0006", UrlFormat = "https://aka.ms/dotnet-warnings/{0}")] public void Abort() { } [System.ObsoleteAttribute("Thread.Abort is not supported and throws PlatformNotSupportedException.", DiagnosticId = "SYSLIB0006", UrlFormat = "https://aka.ms/dotnet-warnings/{0}")] public void Abort(object? stateInfo) { } public static System.LocalDataStoreSlot AllocateDataSlot() { throw null; } public static System.LocalDataStoreSlot AllocateNamedDataSlot(string name) { throw null; } public static void BeginCriticalRegion() { } public static void BeginThreadAffinity() { } public void DisableComObjectEagerCleanup() { } public static void EndCriticalRegion() { } public static void EndThreadAffinity() { } ~Thread() { } public static void FreeNamedDataSlot(string name) { } public System.Threading.ApartmentState GetApartmentState() { throw null; } [System.ObsoleteAttribute("Code Access Security is not supported or honored by the runtime.", DiagnosticId = "SYSLIB0003", UrlFormat = "https://aka.ms/dotnet-warnings/{0}")] public System.Threading.CompressedStack GetCompressedStack() { throw null; } public static int GetCurrentProcessorId() { throw null; } public static object? GetData(System.LocalDataStoreSlot slot) { throw null; } public static System.AppDomain GetDomain() { throw null; } public static int GetDomainID() { throw null; } public override int GetHashCode() { throw null; } public static System.LocalDataStoreSlot GetNamedDataSlot(string name) { throw null; } public void Interrupt() { } public void Join() { } public bool Join(int millisecondsTimeout) { throw null; } public bool Join(System.TimeSpan timeout) { throw null; } public static void MemoryBarrier() { } [System.ObsoleteAttribute("Thread.ResetAbort is not supported and throws PlatformNotSupportedException.", DiagnosticId = "SYSLIB0006", UrlFormat = "https://aka.ms/dotnet-warnings/{0}")] public static void ResetAbort() { } [System.ObsoleteAttribute("Thread.Resume has been deprecated. Use other classes in System.Threading, such as Monitor, Mutex, Event, and Semaphore, to synchronize Threads or protect resources.")] public void Resume() { } [System.Runtime.Versioning.SupportedOSPlatformAttribute("windows")] public void SetApartmentState(System.Threading.ApartmentState state) { } [System.ObsoleteAttribute("Code Access Security is not supported or honored by the runtime.", DiagnosticId = "SYSLIB0003", UrlFormat = "https://aka.ms/dotnet-warnings/{0}")] public void SetCompressedStack(System.Threading.CompressedStack stack) { } public static void SetData(System.LocalDataStoreSlot slot, object? data) { } public static void Sleep(int millisecondsTimeout) { } public static void Sleep(System.TimeSpan timeout) { } public static void SpinWait(int iterations) { } [System.Runtime.Versioning.UnsupportedOSPlatformAttribute("browser")] public void Start() { } [System.Runtime.Versioning.UnsupportedOSPlatformAttribute("browser")] public void Start(object? parameter) { } [System.ObsoleteAttribute("Thread.Suspend has been deprecated. Use other classes in System.Threading, such as Monitor, Mutex, Event, and Semaphore, to synchronize Threads or protect resources.")] public void Suspend() { } public bool TrySetApartmentState(System.Threading.ApartmentState state) { throw null; } [System.Runtime.Versioning.UnsupportedOSPlatformAttribute("browser")] public void UnsafeStart() { } [System.Runtime.Versioning.UnsupportedOSPlatformAttribute("browser")] public void UnsafeStart(object? parameter) { } public static byte VolatileRead(ref byte address) { throw null; } public static double VolatileRead(ref double address) { throw null; } public static short VolatileRead(ref short address) { throw null; } public static int VolatileRead(ref int address) { throw null; } public static long VolatileRead(ref long address) { throw null; } public static System.IntPtr VolatileRead(ref System.IntPtr address) { throw null; } [return: System.Diagnostics.CodeAnalysis.NotNullIfNotNullAttribute("address")] public static object? VolatileRead([System.Diagnostics.CodeAnalysis.NotNullIfNotNullAttribute("address")] ref object? address) { throw null; } [System.CLSCompliantAttribute(false)] public static sbyte VolatileRead(ref sbyte address) { throw null; } public static float VolatileRead(ref float address) { throw null; } [System.CLSCompliantAttribute(false)] public static ushort VolatileRead(ref ushort address) { throw null; } [System.CLSCompliantAttribute(false)] public static uint VolatileRead(ref uint address) { throw null; } [System.CLSCompliantAttribute(false)] public static ulong VolatileRead(ref ulong address) { throw null; } [System.CLSCompliantAttribute(false)] public static System.UIntPtr VolatileRead(ref System.UIntPtr address) { throw null; } public static void VolatileWrite(ref byte address, byte value) { } public static void VolatileWrite(ref double address, double value) { } public static void VolatileWrite(ref short address, short value) { } public static void VolatileWrite(ref int address, int value) { } public static void VolatileWrite(ref long address, long value) { } public static void VolatileWrite(ref System.IntPtr address, System.IntPtr value) { } public static void VolatileWrite([System.Diagnostics.CodeAnalysis.NotNullIfNotNullAttribute("value")] ref object? address, object? value) { } [System.CLSCompliantAttribute(false)] public static void VolatileWrite(ref sbyte address, sbyte value) { } public static void VolatileWrite(ref float address, float value) { } [System.CLSCompliantAttribute(false)] public static void VolatileWrite(ref ushort address, ushort value) { } [System.CLSCompliantAttribute(false)] public static void VolatileWrite(ref uint address, uint value) { } [System.CLSCompliantAttribute(false)] public static void VolatileWrite(ref ulong address, ulong value) { } [System.CLSCompliantAttribute(false)] public static void VolatileWrite(ref System.UIntPtr address, System.UIntPtr value) { } public static bool Yield() { throw null; } } public sealed partial class ThreadAbortException : System.SystemException { internal ThreadAbortException() { } public object? ExceptionState { get { throw null; } } } public partial class ThreadExceptionEventArgs : System.EventArgs { public ThreadExceptionEventArgs(System.Exception t) { } public System.Exception Exception { get { throw null; } } } public delegate void ThreadExceptionEventHandler(object sender, System.Threading.ThreadExceptionEventArgs e); public partial class ThreadInterruptedException : System.SystemException { public ThreadInterruptedException() { } protected ThreadInterruptedException(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context) { } public ThreadInterruptedException(string? message) { } public ThreadInterruptedException(string? message, System.Exception? innerException) { } } public enum ThreadPriority { Lowest = 0, BelowNormal = 1, Normal = 2, AboveNormal = 3, Highest = 4, } public delegate void ThreadStart(); public sealed partial class ThreadStartException : System.SystemException { internal ThreadStartException() { } } [System.FlagsAttribute] public enum ThreadState { Running = 0, StopRequested = 1, SuspendRequested = 2, Background = 4, Unstarted = 8, Stopped = 16, WaitSleepJoin = 32, Suspended = 64, AbortRequested = 128, Aborted = 256, } public partial class ThreadStateException : System.SystemException { public ThreadStateException() { } protected ThreadStateException(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context) { } public ThreadStateException(string? message) { } public ThreadStateException(string? message, System.Exception? innerException) { } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ------------------------------------------------------------------------------ // Changes to this file must follow the https://aka.ms/api-review process. // ------------------------------------------------------------------------------ namespace System { public sealed partial class LocalDataStoreSlot { internal LocalDataStoreSlot() { } ~LocalDataStoreSlot() { } } } namespace System.Threading { public enum ApartmentState { STA = 0, MTA = 1, Unknown = 2, } public sealed partial class CompressedStack : System.Runtime.Serialization.ISerializable { internal CompressedStack() { } public static System.Threading.CompressedStack Capture() { throw null; } public System.Threading.CompressedStack CreateCopy() { throw null; } public static System.Threading.CompressedStack GetCompressedStack() { throw null; } public void GetObjectData(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context) { } public static void Run(System.Threading.CompressedStack compressedStack, System.Threading.ContextCallback callback, object? state) { } } public delegate void ParameterizedThreadStart(object? obj); public sealed partial class Thread : System.Runtime.ConstrainedExecution.CriticalFinalizerObject { public Thread(System.Threading.ParameterizedThreadStart start) { } public Thread(System.Threading.ParameterizedThreadStart start, int maxStackSize) { } public Thread(System.Threading.ThreadStart start) { } public Thread(System.Threading.ThreadStart start, int maxStackSize) { } [System.ObsoleteAttribute("The ApartmentState property has been deprecated. Use GetApartmentState, SetApartmentState or TrySetApartmentState instead.")] public System.Threading.ApartmentState ApartmentState { get { throw null; } set { } } public System.Globalization.CultureInfo CurrentCulture { get { throw null; } set { } } public static System.Security.Principal.IPrincipal? CurrentPrincipal { get { throw null; } set { } } public static System.Threading.Thread CurrentThread { get { throw null; } } public System.Globalization.CultureInfo CurrentUICulture { get { throw null; } set { } } public System.Threading.ExecutionContext? ExecutionContext { get { throw null; } } public bool IsAlive { get { throw null; } } public bool IsBackground { get { throw null; } set { } } public bool IsThreadPoolThread { get { throw null; } } public int ManagedThreadId { get { throw null; } } public string? Name { get { throw null; } set { } } public System.Threading.ThreadPriority Priority { get { throw null; } set { } } public System.Threading.ThreadState ThreadState { get { throw null; } } [System.ObsoleteAttribute("Thread.Abort is not supported and throws PlatformNotSupportedException.", DiagnosticId = "SYSLIB0006", UrlFormat = "https://aka.ms/dotnet-warnings/{0}")] public void Abort() { } [System.ObsoleteAttribute("Thread.Abort is not supported and throws PlatformNotSupportedException.", DiagnosticId = "SYSLIB0006", UrlFormat = "https://aka.ms/dotnet-warnings/{0}")] public void Abort(object? stateInfo) { } public static System.LocalDataStoreSlot AllocateDataSlot() { throw null; } public static System.LocalDataStoreSlot AllocateNamedDataSlot(string name) { throw null; } public static void BeginCriticalRegion() { } public static void BeginThreadAffinity() { } public void DisableComObjectEagerCleanup() { } public static void EndCriticalRegion() { } public static void EndThreadAffinity() { } ~Thread() { } public static void FreeNamedDataSlot(string name) { } public System.Threading.ApartmentState GetApartmentState() { throw null; } [System.ObsoleteAttribute("Code Access Security is not supported or honored by the runtime.", DiagnosticId = "SYSLIB0003", UrlFormat = "https://aka.ms/dotnet-warnings/{0}")] public System.Threading.CompressedStack GetCompressedStack() { throw null; } public static int GetCurrentProcessorId() { throw null; } public static object? GetData(System.LocalDataStoreSlot slot) { throw null; } public static System.AppDomain GetDomain() { throw null; } public static int GetDomainID() { throw null; } public override int GetHashCode() { throw null; } public static System.LocalDataStoreSlot GetNamedDataSlot(string name) { throw null; } public void Interrupt() { } public void Join() { } public bool Join(int millisecondsTimeout) { throw null; } public bool Join(System.TimeSpan timeout) { throw null; } public static void MemoryBarrier() { } [System.ObsoleteAttribute("Thread.ResetAbort is not supported and throws PlatformNotSupportedException.", DiagnosticId = "SYSLIB0006", UrlFormat = "https://aka.ms/dotnet-warnings/{0}")] public static void ResetAbort() { } [System.ObsoleteAttribute("Thread.Resume has been deprecated. Use other classes in System.Threading, such as Monitor, Mutex, Event, and Semaphore, to synchronize Threads or protect resources.")] public void Resume() { } [System.Runtime.Versioning.SupportedOSPlatformAttribute("windows")] public void SetApartmentState(System.Threading.ApartmentState state) { } [System.ObsoleteAttribute("Code Access Security is not supported or honored by the runtime.", DiagnosticId = "SYSLIB0003", UrlFormat = "https://aka.ms/dotnet-warnings/{0}")] public void SetCompressedStack(System.Threading.CompressedStack stack) { } public static void SetData(System.LocalDataStoreSlot slot, object? data) { } public static void Sleep(int millisecondsTimeout) { } public static void Sleep(System.TimeSpan timeout) { } public static void SpinWait(int iterations) { } [System.Runtime.Versioning.UnsupportedOSPlatformAttribute("browser")] public void Start() { } [System.Runtime.Versioning.UnsupportedOSPlatformAttribute("browser")] public void Start(object? parameter) { } [System.ObsoleteAttribute("Thread.Suspend has been deprecated. Use other classes in System.Threading, such as Monitor, Mutex, Event, and Semaphore, to synchronize Threads or protect resources.")] public void Suspend() { } public bool TrySetApartmentState(System.Threading.ApartmentState state) { throw null; } [System.Runtime.Versioning.UnsupportedOSPlatformAttribute("browser")] public void UnsafeStart() { } [System.Runtime.Versioning.UnsupportedOSPlatformAttribute("browser")] public void UnsafeStart(object? parameter) { } public static byte VolatileRead(ref byte address) { throw null; } public static double VolatileRead(ref double address) { throw null; } public static short VolatileRead(ref short address) { throw null; } public static int VolatileRead(ref int address) { throw null; } public static long VolatileRead(ref long address) { throw null; } public static System.IntPtr VolatileRead(ref System.IntPtr address) { throw null; } [return: System.Diagnostics.CodeAnalysis.NotNullIfNotNullAttribute("address")] public static object? VolatileRead([System.Diagnostics.CodeAnalysis.NotNullIfNotNullAttribute("address")] ref object? address) { throw null; } [System.CLSCompliantAttribute(false)] public static sbyte VolatileRead(ref sbyte address) { throw null; } public static float VolatileRead(ref float address) { throw null; } [System.CLSCompliantAttribute(false)] public static ushort VolatileRead(ref ushort address) { throw null; } [System.CLSCompliantAttribute(false)] public static uint VolatileRead(ref uint address) { throw null; } [System.CLSCompliantAttribute(false)] public static ulong VolatileRead(ref ulong address) { throw null; } [System.CLSCompliantAttribute(false)] public static System.UIntPtr VolatileRead(ref System.UIntPtr address) { throw null; } public static void VolatileWrite(ref byte address, byte value) { } public static void VolatileWrite(ref double address, double value) { } public static void VolatileWrite(ref short address, short value) { } public static void VolatileWrite(ref int address, int value) { } public static void VolatileWrite(ref long address, long value) { } public static void VolatileWrite(ref System.IntPtr address, System.IntPtr value) { } public static void VolatileWrite([System.Diagnostics.CodeAnalysis.NotNullIfNotNullAttribute("value")] ref object? address, object? value) { } [System.CLSCompliantAttribute(false)] public static void VolatileWrite(ref sbyte address, sbyte value) { } public static void VolatileWrite(ref float address, float value) { } [System.CLSCompliantAttribute(false)] public static void VolatileWrite(ref ushort address, ushort value) { } [System.CLSCompliantAttribute(false)] public static void VolatileWrite(ref uint address, uint value) { } [System.CLSCompliantAttribute(false)] public static void VolatileWrite(ref ulong address, ulong value) { } [System.CLSCompliantAttribute(false)] public static void VolatileWrite(ref System.UIntPtr address, System.UIntPtr value) { } public static bool Yield() { throw null; } } public sealed partial class ThreadAbortException : System.SystemException { internal ThreadAbortException() { } public object? ExceptionState { get { throw null; } } } public partial class ThreadExceptionEventArgs : System.EventArgs { public ThreadExceptionEventArgs(System.Exception t) { } public System.Exception Exception { get { throw null; } } } public delegate void ThreadExceptionEventHandler(object sender, System.Threading.ThreadExceptionEventArgs e); public partial class ThreadInterruptedException : System.SystemException { public ThreadInterruptedException() { } protected ThreadInterruptedException(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context) { } public ThreadInterruptedException(string? message) { } public ThreadInterruptedException(string? message, System.Exception? innerException) { } } public enum ThreadPriority { Lowest = 0, BelowNormal = 1, Normal = 2, AboveNormal = 3, Highest = 4, } public delegate void ThreadStart(); public sealed partial class ThreadStartException : System.SystemException { internal ThreadStartException() { } } [System.FlagsAttribute] public enum ThreadState { Running = 0, StopRequested = 1, SuspendRequested = 2, Background = 4, Unstarted = 8, Stopped = 16, WaitSleepJoin = 32, Suspended = 64, AbortRequested = 128, Aborted = 256, } public partial class ThreadStateException : System.SystemException { public ThreadStateException() { } protected ThreadStateException(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context) { } public ThreadStateException(string? message) { } public ThreadStateException(string? message, System.Exception? innerException) { } } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/tests/JIT/jit64/valuetypes/nullable/box-unbox/generics/box-unbox-generics017.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="box-unbox-generics017.cs" /> <Compile Include="..\structdef.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="box-unbox-generics017.cs" /> <Compile Include="..\structdef.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd.Arm64/NegateSaturateScalar.Vector64.SByte.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void NegateSaturateScalar_Vector64_SByte() { var test = new SimpleUnaryOpTest__NegateSaturateScalar_Vector64_SByte(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleUnaryOpTest__NegateSaturateScalar_Vector64_SByte { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(SByte[] inArray1, SByte[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<SByte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<SByte>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<SByte, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<SByte> _fld1; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = SByte.MinValue; } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref testStruct._fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); return testStruct; } public void RunStructFldScenario(SimpleUnaryOpTest__NegateSaturateScalar_Vector64_SByte testClass) { var result = AdvSimd.Arm64.NegateSaturateScalar(_fld1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleUnaryOpTest__NegateSaturateScalar_Vector64_SByte testClass) { fixed (Vector64<SByte>* pFld1 = &_fld1) { var result = AdvSimd.Arm64.NegateSaturateScalar( AdvSimd.LoadVector64((SByte*)(pFld1)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<SByte>>() / sizeof(SByte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<SByte>>() / sizeof(SByte); private static SByte[] _data1 = new SByte[Op1ElementCount]; private static Vector64<SByte> _clsVar1; private Vector64<SByte> _fld1; private DataTable _dataTable; static SimpleUnaryOpTest__NegateSaturateScalar_Vector64_SByte() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = SByte.MinValue; } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _clsVar1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); } public SimpleUnaryOpTest__NegateSaturateScalar_Vector64_SByte() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = SByte.MinValue; } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = SByte.MinValue; } _dataTable = new DataTable(_data1, new SByte[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.Arm64.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.Arm64.NegateSaturateScalar( Unsafe.Read<Vector64<SByte>>(_dataTable.inArray1Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.Arm64.NegateSaturateScalar( AdvSimd.LoadVector64((SByte*)(_dataTable.inArray1Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.NegateSaturateScalar), new Type[] { typeof(Vector64<SByte>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<SByte>>(_dataTable.inArray1Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<SByte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.NegateSaturateScalar), new Type[] { typeof(Vector64<SByte>) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((SByte*)(_dataTable.inArray1Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<SByte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.Arm64.NegateSaturateScalar( _clsVar1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<SByte>* pClsVar1 = &_clsVar1) { var result = AdvSimd.Arm64.NegateSaturateScalar( AdvSimd.LoadVector64((SByte*)(pClsVar1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<SByte>>(_dataTable.inArray1Ptr); var result = AdvSimd.Arm64.NegateSaturateScalar(op1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((SByte*)(_dataTable.inArray1Ptr)); var result = AdvSimd.Arm64.NegateSaturateScalar(op1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleUnaryOpTest__NegateSaturateScalar_Vector64_SByte(); var result = AdvSimd.Arm64.NegateSaturateScalar(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleUnaryOpTest__NegateSaturateScalar_Vector64_SByte(); fixed (Vector64<SByte>* pFld1 = &test._fld1) { var result = AdvSimd.Arm64.NegateSaturateScalar( AdvSimd.LoadVector64((SByte*)(pFld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.Arm64.NegateSaturateScalar(_fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<SByte>* pFld1 = &_fld1) { var result = AdvSimd.Arm64.NegateSaturateScalar( AdvSimd.LoadVector64((SByte*)(pFld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.NegateSaturateScalar(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.NegateSaturateScalar( AdvSimd.LoadVector64((SByte*)(&test._fld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<SByte> op1, void* result, [CallerMemberName] string method = "") { SByte[] inArray1 = new SByte[Op1ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<SByte>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "") { SByte[] inArray1 = new SByte[Op1ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<SByte>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(SByte[] firstOp, SByte[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (Helpers.NegateSaturate(firstOp[0]) != result[0]) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != 0) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd.Arm64)}.{nameof(AdvSimd.Arm64.NegateSaturateScalar)}<SByte>(Vector64<SByte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void NegateSaturateScalar_Vector64_SByte() { var test = new SimpleUnaryOpTest__NegateSaturateScalar_Vector64_SByte(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleUnaryOpTest__NegateSaturateScalar_Vector64_SByte { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(SByte[] inArray1, SByte[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<SByte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<SByte>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<SByte, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<SByte> _fld1; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = SByte.MinValue; } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref testStruct._fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); return testStruct; } public void RunStructFldScenario(SimpleUnaryOpTest__NegateSaturateScalar_Vector64_SByte testClass) { var result = AdvSimd.Arm64.NegateSaturateScalar(_fld1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleUnaryOpTest__NegateSaturateScalar_Vector64_SByte testClass) { fixed (Vector64<SByte>* pFld1 = &_fld1) { var result = AdvSimd.Arm64.NegateSaturateScalar( AdvSimd.LoadVector64((SByte*)(pFld1)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<SByte>>() / sizeof(SByte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<SByte>>() / sizeof(SByte); private static SByte[] _data1 = new SByte[Op1ElementCount]; private static Vector64<SByte> _clsVar1; private Vector64<SByte> _fld1; private DataTable _dataTable; static SimpleUnaryOpTest__NegateSaturateScalar_Vector64_SByte() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = SByte.MinValue; } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _clsVar1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); } public SimpleUnaryOpTest__NegateSaturateScalar_Vector64_SByte() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = SByte.MinValue; } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = SByte.MinValue; } _dataTable = new DataTable(_data1, new SByte[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.Arm64.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.Arm64.NegateSaturateScalar( Unsafe.Read<Vector64<SByte>>(_dataTable.inArray1Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.Arm64.NegateSaturateScalar( AdvSimd.LoadVector64((SByte*)(_dataTable.inArray1Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.NegateSaturateScalar), new Type[] { typeof(Vector64<SByte>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<SByte>>(_dataTable.inArray1Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<SByte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.NegateSaturateScalar), new Type[] { typeof(Vector64<SByte>) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((SByte*)(_dataTable.inArray1Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<SByte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.Arm64.NegateSaturateScalar( _clsVar1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<SByte>* pClsVar1 = &_clsVar1) { var result = AdvSimd.Arm64.NegateSaturateScalar( AdvSimd.LoadVector64((SByte*)(pClsVar1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<SByte>>(_dataTable.inArray1Ptr); var result = AdvSimd.Arm64.NegateSaturateScalar(op1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((SByte*)(_dataTable.inArray1Ptr)); var result = AdvSimd.Arm64.NegateSaturateScalar(op1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleUnaryOpTest__NegateSaturateScalar_Vector64_SByte(); var result = AdvSimd.Arm64.NegateSaturateScalar(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleUnaryOpTest__NegateSaturateScalar_Vector64_SByte(); fixed (Vector64<SByte>* pFld1 = &test._fld1) { var result = AdvSimd.Arm64.NegateSaturateScalar( AdvSimd.LoadVector64((SByte*)(pFld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.Arm64.NegateSaturateScalar(_fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<SByte>* pFld1 = &_fld1) { var result = AdvSimd.Arm64.NegateSaturateScalar( AdvSimd.LoadVector64((SByte*)(pFld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.NegateSaturateScalar(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.NegateSaturateScalar( AdvSimd.LoadVector64((SByte*)(&test._fld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<SByte> op1, void* result, [CallerMemberName] string method = "") { SByte[] inArray1 = new SByte[Op1ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<SByte>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "") { SByte[] inArray1 = new SByte[Op1ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<SByte>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(SByte[] firstOp, SByte[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (Helpers.NegateSaturate(firstOp[0]) != result[0]) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != 0) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd.Arm64)}.{nameof(AdvSimd.Arm64.NegateSaturateScalar)}<SByte>(Vector64<SByte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/Microsoft.Extensions.Configuration/src/StreamConfigurationProvider.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.IO; namespace Microsoft.Extensions.Configuration { /// <summary> /// Stream based configuration provider /// </summary> public abstract class StreamConfigurationProvider : ConfigurationProvider { /// <summary> /// The source settings for this provider. /// </summary> public StreamConfigurationSource Source { get; } private bool _loaded; /// <summary> /// Constructor. /// </summary> /// <param name="source">The source.</param> public StreamConfigurationProvider(StreamConfigurationSource source!!) { Source = source; } /// <summary> /// Load the configuration data from the stream. /// </summary> /// <param name="stream">The data stream.</param> public abstract void Load(Stream stream); /// <summary> /// Load the configuration data from the stream. Will throw after the first call. /// </summary> public override void Load() { if (_loaded) { throw new InvalidOperationException(SR.StreamConfigurationProvidersAlreadyLoaded); } if (Source.Stream == null) { throw new InvalidOperationException(SR.StreamConfigurationSourceStreamCannotBeNull); } Load(Source.Stream); _loaded = true; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.IO; namespace Microsoft.Extensions.Configuration { /// <summary> /// Stream based configuration provider /// </summary> public abstract class StreamConfigurationProvider : ConfigurationProvider { /// <summary> /// The source settings for this provider. /// </summary> public StreamConfigurationSource Source { get; } private bool _loaded; /// <summary> /// Constructor. /// </summary> /// <param name="source">The source.</param> public StreamConfigurationProvider(StreamConfigurationSource source!!) { Source = source; } /// <summary> /// Load the configuration data from the stream. /// </summary> /// <param name="stream">The data stream.</param> public abstract void Load(Stream stream); /// <summary> /// Load the configuration data from the stream. Will throw after the first call. /// </summary> public override void Load() { if (_loaded) { throw new InvalidOperationException(SR.StreamConfigurationProvidersAlreadyLoaded); } if (Source.Stream == null) { throw new InvalidOperationException(SR.StreamConfigurationSourceStreamCannotBeNull); } Load(Source.Stream); _loaded = true; } } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/tests/JIT/Generics/Parameters/instance_equalnull_struct01.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="instance_equalnull_struct01.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="instance_equalnull_struct01.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/tests/Interop/PInvoke/Generics/GenericsNative.Point3B.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdio.h> #include <stdint.h> #include <xplatform.h> #include <platformdefines.h> struct Point3B { bool e00; bool e01; bool e02; }; static Point3B Point3BValue = { }; extern "C" DLL_EXPORT Point3B STDMETHODCALLTYPE GetPoint3B(bool e00, bool e01, bool e02) { throw "P/Invoke for Point3<bool> should be unsupported."; } extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetPoint3BOut(bool e00, bool e01, bool e02, Point3B* pValue) { throw "P/Invoke for Point3<bool> should be unsupported."; } extern "C" DLL_EXPORT const Point3B* STDMETHODCALLTYPE GetPoint3BPtr(bool e00, bool e01, bool e02) { throw "P/Invoke for Point3<bool> should be unsupported."; } extern "C" DLL_EXPORT Point3B STDMETHODCALLTYPE AddPoint3B(Point3B lhs, Point3B rhs) { throw "P/Invoke for Point3<bool> should be unsupported."; } extern "C" DLL_EXPORT Point3B STDMETHODCALLTYPE AddPoint3Bs(const Point3B* pValues, uint32_t count) { throw "P/Invoke for Point3<bool> should be unsupported."; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdio.h> #include <stdint.h> #include <xplatform.h> #include <platformdefines.h> struct Point3B { bool e00; bool e01; bool e02; }; static Point3B Point3BValue = { }; extern "C" DLL_EXPORT Point3B STDMETHODCALLTYPE GetPoint3B(bool e00, bool e01, bool e02) { throw "P/Invoke for Point3<bool> should be unsupported."; } extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetPoint3BOut(bool e00, bool e01, bool e02, Point3B* pValue) { throw "P/Invoke for Point3<bool> should be unsupported."; } extern "C" DLL_EXPORT const Point3B* STDMETHODCALLTYPE GetPoint3BPtr(bool e00, bool e01, bool e02) { throw "P/Invoke for Point3<bool> should be unsupported."; } extern "C" DLL_EXPORT Point3B STDMETHODCALLTYPE AddPoint3B(Point3B lhs, Point3B rhs) { throw "P/Invoke for Point3<bool> should be unsupported."; } extern "C" DLL_EXPORT Point3B STDMETHODCALLTYPE AddPoint3Bs(const Point3B* pValues, uint32_t count) { throw "P/Invoke for Point3<bool> should be unsupported."; }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/tests/JIT/Regression/JitBlue/Runtime_57640/Runtime_57640.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Generated by Fuzzlyn v1.2 on 2021-08-15 23:15:19 // Run on .NET 6.0.0-dev on Arm Linux // Seed: 18219619158927602726 // Reduced from 82.6 KiB to 0.3 KiB in 00:02:54 // Debug: Outputs 14270 // Release: Outputs 4294953026 public class Runtime_57640 { static long[] s_28 = new long[]{1}; public static int Main() { bool correct = true; var vr10 = s_28[0]; for (int vr13 = 0; vr13 < 2; vr13++) { uint vr12 = (uint)(0 - (-14270 * vr10)); correct &= vr12 == 14270; } return correct ? 100 : -1; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Generated by Fuzzlyn v1.2 on 2021-08-15 23:15:19 // Run on .NET 6.0.0-dev on Arm Linux // Seed: 18219619158927602726 // Reduced from 82.6 KiB to 0.3 KiB in 00:02:54 // Debug: Outputs 14270 // Release: Outputs 4294953026 public class Runtime_57640 { static long[] s_28 = new long[]{1}; public static int Main() { bool correct = true; var vr10 = s_28[0]; for (int vr13 = 0; vr13 < 2; vr13++) { uint vr12 = (uint)(0 - (-14270 * vr10)); correct &= vr12 == 14270; } return correct ? 100 : -1; } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/tests/JIT/Regression/VS-ia64-JIT/V2.0-Beta2/b302558/_aopst1l.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System; using System.Collections; using System.Runtime.InteropServices; public enum TestEnum { red = 1, green = 2, blue = 4, } [StructLayout(LayoutKind.Sequential)] public struct AA { public bool[, ,] m_abField1; public String Method1(ushort param1, short[,] param2, bool param3) { return ((String)(((object)((param1 /= param1))))); } public static Array[][, ,][][, , ,] Static8() { bool[] local39 = new bool[5] { true, true, true, false, false }; { uint local40 = 65u; #pragma warning disable 253 for (App.m_sbyFwd10 += 49; (new AA().Method1(((ushort)(60u)), (new short[local40, local40]), false) != ((object)(((short)(62.0f))))); App. #pragma warning disable 1717,0162 m_dblFwd11 = App.m_dblFwd11) #pragma warning restore 1717,0162 #pragma warning restore 253 { #pragma warning disable 219 long local41 = ((long)(109.0f)); #pragma warning restore 219 return new Array[][, ,][][,,,]{(new Array[local40, local40, local40][][,,,]) }; } local39[23] = true; #pragma warning disable 162 throw new InvalidOperationException(); } return ((Array[][, ,][][, , ,])(((Array)(null)))); #pragma warning restore 162 } } public class App { static int Main() { try { Console.WriteLine("Testing AA::Static8"); AA.Static8(); } catch (Exception x) { Console.WriteLine("Exception handled: " + x.ToString()); } Console.WriteLine("Passed."); return 100; } public static char m_chFwd1; public static short m_shFwd2; public static String[,][][] m_axFwd3; public static String m_xFwd4; public static int m_iFwd5; public static double[, , ,] m_adblFwd6; public static uint m_uFwd7; public static ulong m_ulFwd8; public static short[,][, ,][] m_ashFwd9; public static sbyte m_sbyFwd10; public static double m_dblFwd11; public static bool m_bFwd12; public static ushort[] m_aushFwd13; public static byte m_byFwd14; public static float m_fFwd15; public static ushort m_ushFwd16; public static long m_lFwd17; public static ulong[] m_aulFwd18; public static ushort[,][,][][] m_aushFwd19; public static char[] m_achFwd20; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System; using System.Collections; using System.Runtime.InteropServices; public enum TestEnum { red = 1, green = 2, blue = 4, } [StructLayout(LayoutKind.Sequential)] public struct AA { public bool[, ,] m_abField1; public String Method1(ushort param1, short[,] param2, bool param3) { return ((String)(((object)((param1 /= param1))))); } public static Array[][, ,][][, , ,] Static8() { bool[] local39 = new bool[5] { true, true, true, false, false }; { uint local40 = 65u; #pragma warning disable 253 for (App.m_sbyFwd10 += 49; (new AA().Method1(((ushort)(60u)), (new short[local40, local40]), false) != ((object)(((short)(62.0f))))); App. #pragma warning disable 1717,0162 m_dblFwd11 = App.m_dblFwd11) #pragma warning restore 1717,0162 #pragma warning restore 253 { #pragma warning disable 219 long local41 = ((long)(109.0f)); #pragma warning restore 219 return new Array[][, ,][][,,,]{(new Array[local40, local40, local40][][,,,]) }; } local39[23] = true; #pragma warning disable 162 throw new InvalidOperationException(); } return ((Array[][, ,][][, , ,])(((Array)(null)))); #pragma warning restore 162 } } public class App { static int Main() { try { Console.WriteLine("Testing AA::Static8"); AA.Static8(); } catch (Exception x) { Console.WriteLine("Exception handled: " + x.ToString()); } Console.WriteLine("Passed."); return 100; } public static char m_chFwd1; public static short m_shFwd2; public static String[,][][] m_axFwd3; public static String m_xFwd4; public static int m_iFwd5; public static double[, , ,] m_adblFwd6; public static uint m_uFwd7; public static ulong m_ulFwd8; public static short[,][, ,][] m_ashFwd9; public static sbyte m_sbyFwd10; public static double m_dblFwd11; public static bool m_bFwd12; public static ushort[] m_aushFwd13; public static byte m_byFwd14; public static float m_fFwd15; public static ushort m_ushFwd16; public static long m_lFwd17; public static ulong[] m_aulFwd18; public static ushort[,][,][][] m_aushFwd19; public static char[] m_achFwd20; }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/tests/JIT/Methodical/Invoke/implicit/iu2.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { } .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly 'iu2' { // .custom instance void ['mscorlib']System.Diagnostics.DebuggableAttribute::.ctor(bool, // bool) = ( 01 00 00 01 00 00 ) } .assembly extern xunit.core {} // MVID: {37F05BF1-E8CC-42FA-A268-D866F07CFFF8} .namespace TestCase { .class private auto ansi Test extends ['mscorlib']System.Object { .method private hidebysig int32 compare(unsigned int16 arg1, unsigned int16 arg2) il managed { // Code size 26 (0x1a) .maxstack 2 .locals (int32 V_0) IL_0000: ldarg.1 IL_0002: ldarg.2 IL_0004: bge.s IL_000a IL_0006: ldc.i4.m1 IL_0007: stloc.0 IL_0008: br.s IL_0018 IL_000a: ldarg.1 IL_000c: ldarg.2 IL_000e: ble.s IL_0014 IL_0010: ldc.i4.m1 IL_0011: stloc.0 IL_0012: br.s IL_0018 IL_0014: ldc.i4.0 IL_0015: stloc.0 IL_0016: br.s IL_0018 IL_0018: ldloc.0 IL_0019: ret } // end of method 'Test::compare' .method private hidebysig static int32 Main() il managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint // Code size 39 (0x27) .maxstack 3 .locals (int32 V_0) newobj instance void TestCase.Test::.ctor() IL_0000: ldc.i4 0xfffff2 conv.i IL_0001: ldc.i4 0xfff2 conv.i IL_0002: call instance int32 TestCase.Test::compare(unsigned int16, unsigned int16) IL_0007: brfalse.s IL_0017 IL_0009: ldstr "FAILED" IL_000e: call void [System.Console]System.Console::WriteLine(class System.String) IL_0013: ldc.i4.1 IL_0014: stloc.0 IL_0015: br.s IL_0025 IL_0017: ldstr "PASSED" IL_001c: call void [System.Console]System.Console::WriteLine(class System.String) IL_0021: ldc.i4 0x64 IL_0022: stloc.0 IL_0023: br.s IL_0025 IL_0025: ldloc.0 IL_0026: ret } // end of method 'Test::Main' .method public hidebysig specialname rtspecialname instance void .ctor() il managed { // Code size 7 (0x7) .maxstack 8 IL_0000: ldarg.0 IL_0001: call instance void ['mscorlib']System.Object::.ctor() IL_0006: ret } // end of method 'Test::.ctor' } // end of class 'Test' } // end of namespace 'TestCase' //*********** DISASSEMBLY COMPLETE ***********************
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { } .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly 'iu2' { // .custom instance void ['mscorlib']System.Diagnostics.DebuggableAttribute::.ctor(bool, // bool) = ( 01 00 00 01 00 00 ) } .assembly extern xunit.core {} // MVID: {37F05BF1-E8CC-42FA-A268-D866F07CFFF8} .namespace TestCase { .class private auto ansi Test extends ['mscorlib']System.Object { .method private hidebysig int32 compare(unsigned int16 arg1, unsigned int16 arg2) il managed { // Code size 26 (0x1a) .maxstack 2 .locals (int32 V_0) IL_0000: ldarg.1 IL_0002: ldarg.2 IL_0004: bge.s IL_000a IL_0006: ldc.i4.m1 IL_0007: stloc.0 IL_0008: br.s IL_0018 IL_000a: ldarg.1 IL_000c: ldarg.2 IL_000e: ble.s IL_0014 IL_0010: ldc.i4.m1 IL_0011: stloc.0 IL_0012: br.s IL_0018 IL_0014: ldc.i4.0 IL_0015: stloc.0 IL_0016: br.s IL_0018 IL_0018: ldloc.0 IL_0019: ret } // end of method 'Test::compare' .method private hidebysig static int32 Main() il managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint // Code size 39 (0x27) .maxstack 3 .locals (int32 V_0) newobj instance void TestCase.Test::.ctor() IL_0000: ldc.i4 0xfffff2 conv.i IL_0001: ldc.i4 0xfff2 conv.i IL_0002: call instance int32 TestCase.Test::compare(unsigned int16, unsigned int16) IL_0007: brfalse.s IL_0017 IL_0009: ldstr "FAILED" IL_000e: call void [System.Console]System.Console::WriteLine(class System.String) IL_0013: ldc.i4.1 IL_0014: stloc.0 IL_0015: br.s IL_0025 IL_0017: ldstr "PASSED" IL_001c: call void [System.Console]System.Console::WriteLine(class System.String) IL_0021: ldc.i4 0x64 IL_0022: stloc.0 IL_0023: br.s IL_0025 IL_0025: ldloc.0 IL_0026: ret } // end of method 'Test::Main' .method public hidebysig specialname rtspecialname instance void .ctor() il managed { // Code size 7 (0x7) .maxstack 8 IL_0000: ldarg.0 IL_0001: call instance void ['mscorlib']System.Object::.ctor() IL_0006: ret } // end of method 'Test::.ctor' } // end of class 'Test' } // end of namespace 'TestCase' //*********** DISASSEMBLY COMPLETE ***********************
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/tests/JIT/Methodical/eh/basics/trycatchtrycatch.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; namespace hello { class Class1 { private static TestUtil.TestLog testLog; static Class1() { // Create test writer object to hold expected output System.IO.StringWriter expectedOut = new System.IO.StringWriter(); // Write expected output to string writer object expectedOut.WriteLine("In try"); expectedOut.WriteLine("In try"); // Create and initialize test log object testLog = new TestUtil.TestLog(expectedOut); } static public void inTry() { Console.WriteLine("In try"); } static public void inCatch() { Console.WriteLine("In catch"); } static public void inFinally() { Console.WriteLine("In finally"); } static public int Main() { //Start recording testLog.StartRecording(); try { inTry(); } catch (Exception e) { Console.WriteLine(e); inCatch(); } try { inTry(); } catch (Exception e) { Console.WriteLine(e); inCatch(); } // stop recoding testLog.StopRecording(); return testLog.VerifyOutput(); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; namespace hello { class Class1 { private static TestUtil.TestLog testLog; static Class1() { // Create test writer object to hold expected output System.IO.StringWriter expectedOut = new System.IO.StringWriter(); // Write expected output to string writer object expectedOut.WriteLine("In try"); expectedOut.WriteLine("In try"); // Create and initialize test log object testLog = new TestUtil.TestLog(expectedOut); } static public void inTry() { Console.WriteLine("In try"); } static public void inCatch() { Console.WriteLine("In catch"); } static public void inFinally() { Console.WriteLine("In finally"); } static public int Main() { //Start recording testLog.StartRecording(); try { inTry(); } catch (Exception e) { Console.WriteLine(e); inCatch(); } try { inTry(); } catch (Exception e) { Console.WriteLine(e); inCatch(); } // stop recoding testLog.StopRecording(); return testLog.VerifyOutput(); } } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest1423/Generated1423.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 } .assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) } //TYPES IN FORWARDER ASSEMBLIES: //TEST ASSEMBLY: .assembly Generated1423 { .hash algorithm 0x00008004 } .assembly extern xunit.core {} .class public BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class public BaseClass1 extends BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void BaseClass0::.ctor() ret } } .class public G3_C1895`1<T0> extends class G2_C819`2<class BaseClass0,class BaseClass0> implements class IBase2`2<class BaseClass0,!T0> { .method public hidebysig virtual instance string Method7<M0>() cil managed noinlining { ldstr "G3_C1895::Method7.18589<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase2<class BaseClass0,T0>.Method7'<M0>() cil managed noinlining { .override method instance string class IBase2`2<class BaseClass0,!T0>::Method7<[1]>() ldstr "G3_C1895::Method7.MI.18590<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod5187() cil managed noinlining { ldstr "G3_C1895::ClassMethod5187.18591()" ret } .method public hidebysig newslot virtual instance string ClassMethod5188() cil managed noinlining { ldstr "G3_C1895::ClassMethod5188.18592()" ret } .method public hidebysig newslot virtual instance string ClassMethod5189<M0>() cil managed noinlining { ldstr "G3_C1895::ClassMethod5189.18593<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod5190<M0>() cil managed noinlining { ldstr "G3_C1895::ClassMethod5190.18594<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'G2_C819<class BaseClass0,class BaseClass0>.ClassMethod3019'() cil managed noinlining { .override method instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3019() ldstr "G3_C1895::ClassMethod3019.MI.18595()" ret } .method public hidebysig newslot virtual instance string 'G2_C819<class BaseClass0,class BaseClass0>.ClassMethod3020'() cil managed noinlining { .override method instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3020() ldstr "G3_C1895::ClassMethod3020.MI.18596()" ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G2_C819`2<class BaseClass0,class BaseClass0>::.ctor() ret } } .class public G2_C819`2<T0, T1> extends class G1_C15`2<class BaseClass0,!T1> implements IBase0, class IBase2`2<!T1,!T1> { .method public hidebysig newslot virtual instance string Method0() cil managed noinlining { ldstr "G2_C819::Method0.12544()" ret } .method public hidebysig newslot virtual instance string Method1() cil managed noinlining { ldstr "G2_C819::Method1.12545()" ret } .method public hidebysig newslot virtual instance string 'IBase0.Method1'() cil managed noinlining { .override method instance string IBase0::Method1() ldstr "G2_C819::Method1.MI.12546()" ret } .method public hidebysig virtual instance string Method2<M0>() cil managed noinlining { ldstr "G2_C819::Method2.12547<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig virtual instance string Method3<M0>() cil managed noinlining { ldstr "G2_C819::Method3.12548<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase0.Method3'<M0>() cil managed noinlining { .override method instance string IBase0::Method3<[1]>() ldstr "G2_C819::Method3.MI.12549<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig virtual instance string Method7<M0>() cil managed noinlining { ldstr "G2_C819::Method7.12550<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod3019() cil managed noinlining { ldstr "G2_C819::ClassMethod3019.12551()" ret } .method public hidebysig newslot virtual instance string ClassMethod3020() cil managed noinlining { ldstr "G2_C819::ClassMethod3020.12552()" ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G1_C15`2<class BaseClass0,!T1>::.ctor() ret } } .class interface public abstract IBase2`2<+T0, -T1> { .method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { } } .class public abstract G1_C15`2<T0, T1> implements class IBase2`2<!T1,!T1>, class IBase1`1<class BaseClass0> { .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "G1_C15::Method7.4885<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase2<T1,T1>.Method7'<M0>() cil managed noinlining { .override method instance string class IBase2`2<!T1,!T1>::Method7<[1]>() ldstr "G1_C15::Method7.MI.4886<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig virtual instance string Method4() cil managed noinlining { ldstr "G1_C15::Method4.4887()" ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method4'() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ret } .method public hidebysig virtual instance string Method5() cil managed noinlining { ldstr "G1_C15::Method5.4889()" ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method5'() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ret } .method public hidebysig virtual instance string Method6<M0>() cil managed noinlining { ldstr "G1_C15::Method6.4891<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method6'<M0>() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method6<[1]>() ldstr "G1_C15::Method6.MI.4892<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class interface public abstract IBase0 { .method public hidebysig newslot abstract virtual instance string Method0() cil managed { } .method public hidebysig newslot abstract virtual instance string Method1() cil managed { } .method public hidebysig newslot abstract virtual instance string Method2<M0>() cil managed { } .method public hidebysig newslot abstract virtual instance string Method3<M0>() cil managed { } } .class interface public abstract IBase1`1<+T0> { .method public hidebysig newslot abstract virtual instance string Method4() cil managed { } .method public hidebysig newslot abstract virtual instance string Method5() cil managed { } .method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { } } .class public auto ansi beforefieldinit Generated1423 { .method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1895.T<T0,(class G3_C1895`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1895.T<T0,(class G3_C1895`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::ClassMethod5187() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::ClassMethod5188() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::ClassMethod5189<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::ClassMethod5190<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1895.A<(class G3_C1895`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1895.A<(class G3_C1895`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod5187() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod5188() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod5189<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod5190<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1895.B<(class G3_C1895`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1895.B<(class G3_C1895`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod5187() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod5188() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod5189<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod5190<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C819.T.T<T0,T1,(class G2_C819`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C819.T.T<T0,T1,(class G2_C819`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C819.A.T<T1,(class G2_C819`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C819.A.T<T1,(class G2_C819`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C819.A.A<(class G2_C819`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C819.A.A<(class G2_C819`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C819.A.B<(class G2_C819`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C819.A.B<(class G2_C819`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C819.B.T<T1,(class G2_C819`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C819.B.T<T1,(class G2_C819`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C819.B.A<(class G2_C819`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C819.B.A<(class G2_C819`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C819.B.B<(class G2_C819`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C819.B.B<(class G2_C819`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.T.T<T0,T1,(class G1_C15`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.T.T<T0,T1,(class G1_C15`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.A.T<T1,(class G1_C15`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.A.T<T1,(class G1_C15`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.A.A<(class G1_C15`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.A.A<(class G1_C15`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.A.B<(class G1_C15`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.A.B<(class G1_C15`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.B.T<T1,(class G1_C15`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.B.T<T1,(class G1_C15`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.B.A<(class G1_C15`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.B.A<(class G1_C15`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.B.B<(class G1_C15`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.B.B<(class G1_C15`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase0<(IBase0)W>(!!W inst, string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase0<(IBase0)W>(!!W inst, string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string IBase0::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string IBase0::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string IBase0::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string IBase0::Method3<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method public hidebysig static void MethodCallingTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calling Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1895`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G3_C1895::Method7.MI.18590<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G3_C1895::Method7.MI.18590<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3020() ldstr "G3_C1895::ClassMethod3020.MI.18596()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3019() ldstr "G3_C1895::ClassMethod3019.MI.18595()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method3<object>() ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method1() ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method0() ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod5190<object>() ldstr "G3_C1895::ClassMethod5190.18594<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod5189<object>() ldstr "G3_C1895::ClassMethod5189.18593<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod5188() ldstr "G3_C1895::ClassMethod5188.18592()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod5187() ldstr "G3_C1895::ClassMethod5187.18591()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::Method7<object>() ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod3020() ldstr "G3_C1895::ClassMethod3020.MI.18596()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod3019() ldstr "G3_C1895::ClassMethod3019.MI.18595()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::Method3<object>() ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::Method1() ldstr "G2_C819::Method1.12545()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::Method0() ldstr "G2_C819::Method0.12544()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G3_C1895`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G3_C1895::Method7.MI.18590<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3020() ldstr "G3_C1895::ClassMethod3020.MI.18596()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3019() ldstr "G3_C1895::ClassMethod3019.MI.18595()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method3<object>() ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method1() ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method0() ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod5190<object>() ldstr "G3_C1895::ClassMethod5190.18594<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod5189<object>() ldstr "G3_C1895::ClassMethod5189.18593<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod5188() ldstr "G3_C1895::ClassMethod5188.18592()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod5187() ldstr "G3_C1895::ClassMethod5187.18591()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::Method7<object>() ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod3020() ldstr "G3_C1895::ClassMethod3020.MI.18596()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod3019() ldstr "G3_C1895::ClassMethod3019.MI.18595()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::Method3<object>() ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::Method1() ldstr "G2_C819::Method1.12545()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::Method0() ldstr "G2_C819::Method0.12544()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C819`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3020() ldstr "G2_C819::ClassMethod3020.12552()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3019() ldstr "G2_C819::ClassMethod3019.12551()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method3<object>() ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method1() ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method0() ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C819`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::ClassMethod3020() ldstr "G2_C819::ClassMethod3020.12552()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::ClassMethod3019() ldstr "G2_C819::ClassMethod3019.12551()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method3<object>() ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method1() ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method0() ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C819`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::ClassMethod3020() ldstr "G2_C819::ClassMethod3020.12552()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::ClassMethod3019() ldstr "G2_C819::ClassMethod3019.12551()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method3<object>() ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method1() ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method0() ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C819`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::ClassMethod3020() ldstr "G2_C819::ClassMethod3020.12552()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::ClassMethod3019() ldstr "G2_C819::ClassMethod3019.12551()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method3<object>() ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method1() ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method0() ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void ConstrainedCallsTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Constrained Calls Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1895`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G1_C15.T.T<class BaseClass0,class BaseClass0,class G3_C1895`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G1_C15.A.T<class BaseClass0,class G3_C1895`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G1_C15.A.A<class G3_C1895`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1895::Method7.MI.18590<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1895`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1895::Method7.MI.18590<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass0,class G3_C1895`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1895::Method7.MI.18590<System.Object>()#" call void Generated1423::M.IBase2.A.A<class G3_C1895`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.T<class BaseClass0,class G3_C1895`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.A<class G3_C1895`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1895::Method7.MI.18590<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1895`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1895::Method7.MI.18590<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass1,class G3_C1895`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1895::Method7.MI.18590<System.Object>()#" call void Generated1423::M.IBase2.A.B<class G3_C1895`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G2_C819.T.T<class BaseClass0,class BaseClass0,class G3_C1895`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G2_C819.A.T<class BaseClass0,class G3_C1895`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G2_C819.A.A<class G3_C1895`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C819::Method0.12544()#G2_C819::Method1.MI.12546()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.MI.12549<System.Object>()#" call void Generated1423::M.IBase0<class G3_C1895`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G3_C1895::ClassMethod5187.18591()#G3_C1895::ClassMethod5188.18592()#G3_C1895::ClassMethod5189.18593<System.Object>()#G3_C1895::ClassMethod5190.18594<System.Object>()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G3_C1895.T<class BaseClass0,class G3_C1895`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G3_C1895::ClassMethod5187.18591()#G3_C1895::ClassMethod5188.18592()#G3_C1895::ClassMethod5189.18593<System.Object>()#G3_C1895::ClassMethod5190.18594<System.Object>()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G3_C1895.A<class G3_C1895`1<class BaseClass0>>(!!0,string) newobj instance void class G3_C1895`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G1_C15.T.T<class BaseClass0,class BaseClass0,class G3_C1895`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G1_C15.A.T<class BaseClass0,class G3_C1895`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G1_C15.A.A<class G3_C1895`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1895`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass0,class G3_C1895`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.IBase2.A.A<class G3_C1895`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.T<class BaseClass0,class G3_C1895`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.A<class G3_C1895`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1895::Method7.MI.18590<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1895`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1895::Method7.MI.18590<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass1,class G3_C1895`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1895::Method7.MI.18590<System.Object>()#" call void Generated1423::M.IBase2.A.B<class G3_C1895`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G2_C819.T.T<class BaseClass0,class BaseClass0,class G3_C1895`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G2_C819.A.T<class BaseClass0,class G3_C1895`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G2_C819.A.A<class G3_C1895`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C819::Method0.12544()#G2_C819::Method1.MI.12546()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.MI.12549<System.Object>()#" call void Generated1423::M.IBase0<class G3_C1895`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G3_C1895::ClassMethod5187.18591()#G3_C1895::ClassMethod5188.18592()#G3_C1895::ClassMethod5189.18593<System.Object>()#G3_C1895::ClassMethod5190.18594<System.Object>()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G3_C1895.T<class BaseClass1,class G3_C1895`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G3_C1895::ClassMethod5187.18591()#G3_C1895::ClassMethod5188.18592()#G3_C1895::ClassMethod5189.18593<System.Object>()#G3_C1895::ClassMethod5190.18594<System.Object>()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G3_C1895.B<class G3_C1895`1<class BaseClass1>>(!!0,string) newobj instance void class G2_C819`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.T.T<class BaseClass0,class BaseClass0,class G2_C819`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.A.T<class BaseClass0,class G2_C819`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.A.A<class G2_C819`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C819`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass0,class G2_C819`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.A<class G2_C819`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.T<class BaseClass0,class G2_C819`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.A<class G2_C819`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.B<class G2_C819`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.T.T<class BaseClass0,class BaseClass0,class G2_C819`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.A.T<class BaseClass0,class G2_C819`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.A.A<class G2_C819`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C819::Method0.12544()#G2_C819::Method1.MI.12546()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.MI.12549<System.Object>()#" call void Generated1423::M.IBase0<class G2_C819`2<class BaseClass0,class BaseClass0>>(!!0,string) newobj instance void class G2_C819`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.T.T<class BaseClass0,class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.A.T<class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.A.B<class G2_C819`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.B.T<class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.B.B<class G2_C819`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.T<class BaseClass0,class G2_C819`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.A<class G2_C819`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.B<class G2_C819`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.T.T<class BaseClass0,class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.A.T<class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.A.B<class G2_C819`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C819::Method0.12544()#G2_C819::Method1.MI.12546()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.MI.12549<System.Object>()#" call void Generated1423::M.IBase0<class G2_C819`2<class BaseClass0,class BaseClass1>>(!!0,string) newobj instance void class G2_C819`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.T.T<class BaseClass0,class BaseClass0,class G2_C819`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.A.T<class BaseClass0,class G2_C819`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.A.A<class G2_C819`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C819`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass0,class G2_C819`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.A<class G2_C819`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.T<class BaseClass0,class G2_C819`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.A<class G2_C819`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.B<class G2_C819`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.T.T<class BaseClass1,class BaseClass0,class G2_C819`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.B.T<class BaseClass0,class G2_C819`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.B.A<class G2_C819`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C819::Method0.12544()#G2_C819::Method1.MI.12546()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.MI.12549<System.Object>()#" call void Generated1423::M.IBase0<class G2_C819`2<class BaseClass1,class BaseClass0>>(!!0,string) newobj instance void class G2_C819`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.T.T<class BaseClass0,class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.A.T<class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.A.B<class G2_C819`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.B.T<class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.B.B<class G2_C819`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.T<class BaseClass0,class G2_C819`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.A<class G2_C819`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.B<class G2_C819`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.T.T<class BaseClass1,class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.B.T<class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.B.B<class G2_C819`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C819::Method0.12544()#G2_C819::Method1.MI.12546()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.MI.12549<System.Object>()#" call void Generated1423::M.IBase0<class G2_C819`2<class BaseClass1,class BaseClass1>>(!!0,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed { .maxstack 10 ldstr "===================== Struct Constrained Interface Calls Test =====================" call void [mscorlib]System.Console::WriteLine(string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void CalliTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calli Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1895`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::Method7.MI.18590<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::Method7.MI.18590<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3020() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::ClassMethod3020.MI.18596()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3019() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::ClassMethod3019.MI.18595()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method3<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method2<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method1() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method0() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::ClassMethod5190<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::ClassMethod5190.18594<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::ClassMethod5189<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::ClassMethod5189.18593<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::ClassMethod5188() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::ClassMethod5188.18592()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::ClassMethod5187() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::ClassMethod5187.18591()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::ClassMethod3020() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::ClassMethod3020.MI.18596()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::ClassMethod3019() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::ClassMethod3019.MI.18595()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::Method3<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::Method2<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::Method1() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method1.12545()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::Method0() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method0.12544()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::Method5() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::Method4() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G3_C1895`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::Method7.MI.18590<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3020() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::ClassMethod3020.MI.18596()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3019() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::ClassMethod3019.MI.18595()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method3<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method2<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method1() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method0() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::ClassMethod5190<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::ClassMethod5190.18594<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::ClassMethod5189<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::ClassMethod5189.18593<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::ClassMethod5188() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::ClassMethod5188.18592()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::ClassMethod5187() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::ClassMethod5187.18591()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::ClassMethod3020() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::ClassMethod3020.MI.18596()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::ClassMethod3019() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::ClassMethod3019.MI.18595()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::Method3<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::Method2<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::Method1() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method1.12545()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::Method0() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method0.12544()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::Method5() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::Method4() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C819`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3020() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::ClassMethod3020.12552()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3019() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::ClassMethod3019.12551()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method3<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method2<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method1() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method0() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C819`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::ClassMethod3020() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::ClassMethod3020.12552()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::ClassMethod3019() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::ClassMethod3019.12551()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method3<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method2<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method1() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method0() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C819`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::ClassMethod3020() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::ClassMethod3020.12552()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::ClassMethod3019() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::ClassMethod3019.12551()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method3<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method2<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method1() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method0() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C819`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::ClassMethod3020() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::ClassMethod3020.12552()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::ClassMethod3019() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::ClassMethod3019.12551()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method3<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method2<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method1() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method0() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 10 call void Generated1423::MethodCallingTest() call void Generated1423::ConstrainedCallsTest() call void Generated1423::StructConstrainedInterfaceCallsTest() call void Generated1423::CalliTest() ldc.i4 100 ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 } .assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) } //TYPES IN FORWARDER ASSEMBLIES: //TEST ASSEMBLY: .assembly Generated1423 { .hash algorithm 0x00008004 } .assembly extern xunit.core {} .class public BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class public BaseClass1 extends BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void BaseClass0::.ctor() ret } } .class public G3_C1895`1<T0> extends class G2_C819`2<class BaseClass0,class BaseClass0> implements class IBase2`2<class BaseClass0,!T0> { .method public hidebysig virtual instance string Method7<M0>() cil managed noinlining { ldstr "G3_C1895::Method7.18589<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase2<class BaseClass0,T0>.Method7'<M0>() cil managed noinlining { .override method instance string class IBase2`2<class BaseClass0,!T0>::Method7<[1]>() ldstr "G3_C1895::Method7.MI.18590<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod5187() cil managed noinlining { ldstr "G3_C1895::ClassMethod5187.18591()" ret } .method public hidebysig newslot virtual instance string ClassMethod5188() cil managed noinlining { ldstr "G3_C1895::ClassMethod5188.18592()" ret } .method public hidebysig newslot virtual instance string ClassMethod5189<M0>() cil managed noinlining { ldstr "G3_C1895::ClassMethod5189.18593<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod5190<M0>() cil managed noinlining { ldstr "G3_C1895::ClassMethod5190.18594<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'G2_C819<class BaseClass0,class BaseClass0>.ClassMethod3019'() cil managed noinlining { .override method instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3019() ldstr "G3_C1895::ClassMethod3019.MI.18595()" ret } .method public hidebysig newslot virtual instance string 'G2_C819<class BaseClass0,class BaseClass0>.ClassMethod3020'() cil managed noinlining { .override method instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3020() ldstr "G3_C1895::ClassMethod3020.MI.18596()" ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G2_C819`2<class BaseClass0,class BaseClass0>::.ctor() ret } } .class public G2_C819`2<T0, T1> extends class G1_C15`2<class BaseClass0,!T1> implements IBase0, class IBase2`2<!T1,!T1> { .method public hidebysig newslot virtual instance string Method0() cil managed noinlining { ldstr "G2_C819::Method0.12544()" ret } .method public hidebysig newslot virtual instance string Method1() cil managed noinlining { ldstr "G2_C819::Method1.12545()" ret } .method public hidebysig newslot virtual instance string 'IBase0.Method1'() cil managed noinlining { .override method instance string IBase0::Method1() ldstr "G2_C819::Method1.MI.12546()" ret } .method public hidebysig virtual instance string Method2<M0>() cil managed noinlining { ldstr "G2_C819::Method2.12547<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig virtual instance string Method3<M0>() cil managed noinlining { ldstr "G2_C819::Method3.12548<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase0.Method3'<M0>() cil managed noinlining { .override method instance string IBase0::Method3<[1]>() ldstr "G2_C819::Method3.MI.12549<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig virtual instance string Method7<M0>() cil managed noinlining { ldstr "G2_C819::Method7.12550<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod3019() cil managed noinlining { ldstr "G2_C819::ClassMethod3019.12551()" ret } .method public hidebysig newslot virtual instance string ClassMethod3020() cil managed noinlining { ldstr "G2_C819::ClassMethod3020.12552()" ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G1_C15`2<class BaseClass0,!T1>::.ctor() ret } } .class interface public abstract IBase2`2<+T0, -T1> { .method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { } } .class public abstract G1_C15`2<T0, T1> implements class IBase2`2<!T1,!T1>, class IBase1`1<class BaseClass0> { .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "G1_C15::Method7.4885<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase2<T1,T1>.Method7'<M0>() cil managed noinlining { .override method instance string class IBase2`2<!T1,!T1>::Method7<[1]>() ldstr "G1_C15::Method7.MI.4886<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig virtual instance string Method4() cil managed noinlining { ldstr "G1_C15::Method4.4887()" ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method4'() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ret } .method public hidebysig virtual instance string Method5() cil managed noinlining { ldstr "G1_C15::Method5.4889()" ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method5'() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ret } .method public hidebysig virtual instance string Method6<M0>() cil managed noinlining { ldstr "G1_C15::Method6.4891<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method6'<M0>() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method6<[1]>() ldstr "G1_C15::Method6.MI.4892<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class interface public abstract IBase0 { .method public hidebysig newslot abstract virtual instance string Method0() cil managed { } .method public hidebysig newslot abstract virtual instance string Method1() cil managed { } .method public hidebysig newslot abstract virtual instance string Method2<M0>() cil managed { } .method public hidebysig newslot abstract virtual instance string Method3<M0>() cil managed { } } .class interface public abstract IBase1`1<+T0> { .method public hidebysig newslot abstract virtual instance string Method4() cil managed { } .method public hidebysig newslot abstract virtual instance string Method5() cil managed { } .method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { } } .class public auto ansi beforefieldinit Generated1423 { .method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1895.T<T0,(class G3_C1895`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1895.T<T0,(class G3_C1895`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::ClassMethod5187() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::ClassMethod5188() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::ClassMethod5189<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::ClassMethod5190<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<!!T0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1895.A<(class G3_C1895`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1895.A<(class G3_C1895`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod5187() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod5188() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod5189<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod5190<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1895.B<(class G3_C1895`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1895.B<(class G3_C1895`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod5187() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod5188() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod5189<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod5190<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1895`1<class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C819.T.T<T0,T1,(class G2_C819`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C819.T.T<T0,T1,(class G2_C819`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C819.A.T<T1,(class G2_C819`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C819.A.T<T1,(class G2_C819`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C819.A.A<(class G2_C819`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C819.A.A<(class G2_C819`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C819.A.B<(class G2_C819`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C819.A.B<(class G2_C819`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C819.B.T<T1,(class G2_C819`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C819.B.T<T1,(class G2_C819`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C819.B.A<(class G2_C819`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C819.B.A<(class G2_C819`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C819.B.B<(class G2_C819`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C819.B.B<(class G2_C819`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::ClassMethod3019() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::ClassMethod3020() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method3<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.T.T<T0,T1,(class G1_C15`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.T.T<T0,T1,(class G1_C15`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.A.T<T1,(class G1_C15`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.A.T<T1,(class G1_C15`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.A.A<(class G1_C15`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.A.A<(class G1_C15`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.A.B<(class G1_C15`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.A.B<(class G1_C15`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.B.T<T1,(class G1_C15`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.B.T<T1,(class G1_C15`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.B.A<(class G1_C15`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.B.A<(class G1_C15`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.B.B<(class G1_C15`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.B.B<(class G1_C15`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase0<(IBase0)W>(!!W inst, string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase0<(IBase0)W>(!!W inst, string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string IBase0::Method0() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string IBase0::Method1() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string IBase0::Method2<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string IBase0::Method3<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method public hidebysig static void MethodCallingTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calling Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1895`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G3_C1895::Method7.MI.18590<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G3_C1895::Method7.MI.18590<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3020() ldstr "G3_C1895::ClassMethod3020.MI.18596()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3019() ldstr "G3_C1895::ClassMethod3019.MI.18595()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method3<object>() ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method1() ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method0() ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod5190<object>() ldstr "G3_C1895::ClassMethod5190.18594<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod5189<object>() ldstr "G3_C1895::ClassMethod5189.18593<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod5188() ldstr "G3_C1895::ClassMethod5188.18592()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod5187() ldstr "G3_C1895::ClassMethod5187.18591()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::Method7<object>() ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod3020() ldstr "G3_C1895::ClassMethod3020.MI.18596()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::ClassMethod3019() ldstr "G3_C1895::ClassMethod3019.MI.18595()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::Method3<object>() ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::Method1() ldstr "G2_C819::Method1.12545()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::Method0() ldstr "G2_C819::Method0.12544()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass0> callvirt instance string class G3_C1895`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G3_C1895`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G3_C1895::Method7.MI.18590<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3020() ldstr "G3_C1895::ClassMethod3020.MI.18596()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3019() ldstr "G3_C1895::ClassMethod3019.MI.18595()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method3<object>() ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method1() ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method0() ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod5190<object>() ldstr "G3_C1895::ClassMethod5190.18594<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod5189<object>() ldstr "G3_C1895::ClassMethod5189.18593<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod5188() ldstr "G3_C1895::ClassMethod5188.18592()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod5187() ldstr "G3_C1895::ClassMethod5187.18591()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::Method7<object>() ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod3020() ldstr "G3_C1895::ClassMethod3020.MI.18596()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::ClassMethod3019() ldstr "G3_C1895::ClassMethod3019.MI.18595()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::Method3<object>() ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::Method1() ldstr "G2_C819::Method1.12545()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::Method0() ldstr "G2_C819::Method0.12544()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1895`1<class BaseClass1> callvirt instance string class G3_C1895`1<class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C819`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3020() ldstr "G2_C819::ClassMethod3020.12552()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3019() ldstr "G2_C819::ClassMethod3019.12551()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method3<object>() ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method1() ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method0() ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C819`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::ClassMethod3020() ldstr "G2_C819::ClassMethod3020.12552()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::ClassMethod3019() ldstr "G2_C819::ClassMethod3019.12551()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method3<object>() ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method1() ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method0() ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C819`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::ClassMethod3020() ldstr "G2_C819::ClassMethod3020.12552()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::ClassMethod3019() ldstr "G2_C819::ClassMethod3019.12551()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method3<object>() ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method1() ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method0() ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C819`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::ClassMethod3020() ldstr "G2_C819::ClassMethod3020.12552()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::ClassMethod3019() ldstr "G2_C819::ClassMethod3019.12551()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method3<object>() ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method1() ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method0() ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C819`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string IBase0::Method0() ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method1() ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method2<object>() ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string IBase0::Method3<object>() ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void ConstrainedCallsTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Constrained Calls Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1895`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G1_C15.T.T<class BaseClass0,class BaseClass0,class G3_C1895`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G1_C15.A.T<class BaseClass0,class G3_C1895`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G1_C15.A.A<class G3_C1895`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1895::Method7.MI.18590<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1895`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1895::Method7.MI.18590<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass0,class G3_C1895`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1895::Method7.MI.18590<System.Object>()#" call void Generated1423::M.IBase2.A.A<class G3_C1895`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.T<class BaseClass0,class G3_C1895`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.A<class G3_C1895`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1895::Method7.MI.18590<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1895`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1895::Method7.MI.18590<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass1,class G3_C1895`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1895::Method7.MI.18590<System.Object>()#" call void Generated1423::M.IBase2.A.B<class G3_C1895`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G2_C819.T.T<class BaseClass0,class BaseClass0,class G3_C1895`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G2_C819.A.T<class BaseClass0,class G3_C1895`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G2_C819.A.A<class G3_C1895`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C819::Method0.12544()#G2_C819::Method1.MI.12546()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.MI.12549<System.Object>()#" call void Generated1423::M.IBase0<class G3_C1895`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G3_C1895::ClassMethod5187.18591()#G3_C1895::ClassMethod5188.18592()#G3_C1895::ClassMethod5189.18593<System.Object>()#G3_C1895::ClassMethod5190.18594<System.Object>()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G3_C1895.T<class BaseClass0,class G3_C1895`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G3_C1895::ClassMethod5187.18591()#G3_C1895::ClassMethod5188.18592()#G3_C1895::ClassMethod5189.18593<System.Object>()#G3_C1895::ClassMethod5190.18594<System.Object>()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G3_C1895.A<class G3_C1895`1<class BaseClass0>>(!!0,string) newobj instance void class G3_C1895`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G1_C15.T.T<class BaseClass0,class BaseClass0,class G3_C1895`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G1_C15.A.T<class BaseClass0,class G3_C1895`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G1_C15.A.A<class G3_C1895`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1895`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass0,class G3_C1895`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.IBase2.A.A<class G3_C1895`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.T<class BaseClass0,class G3_C1895`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.A<class G3_C1895`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1895::Method7.MI.18590<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1895`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1895::Method7.MI.18590<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass1,class G3_C1895`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1895::Method7.MI.18590<System.Object>()#" call void Generated1423::M.IBase2.A.B<class G3_C1895`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G2_C819.T.T<class BaseClass0,class BaseClass0,class G3_C1895`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G2_C819.A.T<class BaseClass0,class G3_C1895`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G2_C819.A.A<class G3_C1895`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C819::Method0.12544()#G2_C819::Method1.MI.12546()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.MI.12549<System.Object>()#" call void Generated1423::M.IBase0<class G3_C1895`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G3_C1895::ClassMethod5187.18591()#G3_C1895::ClassMethod5188.18592()#G3_C1895::ClassMethod5189.18593<System.Object>()#G3_C1895::ClassMethod5190.18594<System.Object>()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G3_C1895.T<class BaseClass1,class G3_C1895`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1895::ClassMethod3019.MI.18595()#G3_C1895::ClassMethod3020.MI.18596()#G3_C1895::ClassMethod5187.18591()#G3_C1895::ClassMethod5188.18592()#G3_C1895::ClassMethod5189.18593<System.Object>()#G3_C1895::ClassMethod5190.18594<System.Object>()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1895::Method7.18589<System.Object>()#" call void Generated1423::M.G3_C1895.B<class G3_C1895`1<class BaseClass1>>(!!0,string) newobj instance void class G2_C819`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.T.T<class BaseClass0,class BaseClass0,class G2_C819`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.A.T<class BaseClass0,class G2_C819`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.A.A<class G2_C819`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C819`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass0,class G2_C819`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.A<class G2_C819`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.T<class BaseClass0,class G2_C819`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.A<class G2_C819`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.B<class G2_C819`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.T.T<class BaseClass0,class BaseClass0,class G2_C819`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.A.T<class BaseClass0,class G2_C819`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.A.A<class G2_C819`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C819::Method0.12544()#G2_C819::Method1.MI.12546()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.MI.12549<System.Object>()#" call void Generated1423::M.IBase0<class G2_C819`2<class BaseClass0,class BaseClass0>>(!!0,string) newobj instance void class G2_C819`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.T.T<class BaseClass0,class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.A.T<class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.A.B<class G2_C819`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.B.T<class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.B.B<class G2_C819`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.T<class BaseClass0,class G2_C819`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.A<class G2_C819`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.B<class G2_C819`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.T.T<class BaseClass0,class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.A.T<class BaseClass1,class G2_C819`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.A.B<class G2_C819`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C819::Method0.12544()#G2_C819::Method1.MI.12546()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.MI.12549<System.Object>()#" call void Generated1423::M.IBase0<class G2_C819`2<class BaseClass0,class BaseClass1>>(!!0,string) newobj instance void class G2_C819`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.T.T<class BaseClass0,class BaseClass0,class G2_C819`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.A.T<class BaseClass0,class G2_C819`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.A.A<class G2_C819`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C819`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass0,class G2_C819`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.A<class G2_C819`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.T<class BaseClass0,class G2_C819`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.A<class G2_C819`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.B<class G2_C819`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.T.T<class BaseClass1,class BaseClass0,class G2_C819`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.B.T<class BaseClass0,class G2_C819`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.B.A<class G2_C819`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C819::Method0.12544()#G2_C819::Method1.MI.12546()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.MI.12549<System.Object>()#" call void Generated1423::M.IBase0<class G2_C819`2<class BaseClass1,class BaseClass0>>(!!0,string) newobj instance void class G2_C819`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.T.T<class BaseClass0,class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.A.T<class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G1_C15.A.B<class G2_C819`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.B.T<class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.B.B<class G2_C819`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.T<class BaseClass0,class G2_C819`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1423::M.IBase1.A<class G2_C819`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.T<class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.IBase2.A.B<class G2_C819`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.T.T<class BaseClass1,class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.B.T<class BaseClass1,class G2_C819`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C819::ClassMethod3019.12551()#G2_C819::ClassMethod3020.12552()#G2_C819::Method0.12544()#G2_C819::Method1.12545()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.12548<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C819::Method7.12550<System.Object>()#" call void Generated1423::M.G2_C819.B.B<class G2_C819`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C819::Method0.12544()#G2_C819::Method1.MI.12546()#G2_C819::Method2.12547<System.Object>()#G2_C819::Method3.MI.12549<System.Object>()#" call void Generated1423::M.IBase0<class G2_C819`2<class BaseClass1,class BaseClass1>>(!!0,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed { .maxstack 10 ldstr "===================== Struct Constrained Interface Calls Test =====================" call void [mscorlib]System.Console::WriteLine(string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void CalliTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calli Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1895`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::Method7.MI.18590<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::Method7.MI.18590<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3020() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::ClassMethod3020.MI.18596()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3019() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::ClassMethod3019.MI.18595()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method3<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method2<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method1() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method0() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::ClassMethod5190<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::ClassMethod5190.18594<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::ClassMethod5189<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::ClassMethod5189.18593<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::ClassMethod5188() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::ClassMethod5188.18592()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::ClassMethod5187() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::ClassMethod5187.18591()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::ClassMethod3020() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::ClassMethod3020.MI.18596()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::ClassMethod3019() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G3_C1895::ClassMethod3019.MI.18595()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::Method3<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::Method2<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::Method1() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method1.12545()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::Method0() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G2_C819::Method0.12544()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::Method5() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass0>::Method4() calli default string(class G3_C1895`1<class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G3_C1895`1<class BaseClass0> on type class G3_C1895`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G3_C1895`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::Method7.MI.18590<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3020() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::ClassMethod3020.MI.18596()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3019() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::ClassMethod3019.MI.18595()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method3<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method2<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method1() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method0() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::ClassMethod5190<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::ClassMethod5190.18594<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::ClassMethod5189<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::ClassMethod5189.18593<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::ClassMethod5188() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::ClassMethod5188.18592()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::ClassMethod5187() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::ClassMethod5187.18591()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::Method7<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::Method7.18589<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::ClassMethod3020() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::ClassMethod3020.MI.18596()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::ClassMethod3019() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G3_C1895::ClassMethod3019.MI.18595()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::Method3<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::Method2<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::Method1() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method1.12545()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::Method0() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G2_C819::Method0.12544()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::Method5() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1895`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1895`1<class BaseClass1>::Method4() calli default string(class G3_C1895`1<class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G3_C1895`1<class BaseClass1> on type class G3_C1895`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C819`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3020() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::ClassMethod3020.12552()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::ClassMethod3019() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::ClassMethod3019.12551()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method3<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method2<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method1() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method0() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass0>) ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C819`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::ClassMethod3020() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::ClassMethod3020.12552()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::ClassMethod3019() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::ClassMethod3019.12551()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method3<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method2<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method1() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method0() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G2_C819`2<class BaseClass0,class BaseClass1>) ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C819`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::ClassMethod3020() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::ClassMethod3020.12552()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::ClassMethod3019() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::ClassMethod3019.12551()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method3<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method2<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method1() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method0() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass0>) ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C819`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::ClassMethod3020() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::ClassMethod3020.12552()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::ClassMethod3019() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::ClassMethod3019.12551()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method7.12550<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method3<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method3.12548<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method2<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method1() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method1.12545()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method0() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method0.12544()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C819`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C819`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C819`2<class BaseClass1,class BaseClass1> on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method0() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method0.12544()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method1() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method1.MI.12546()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method2<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method2.12547<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string IBase0::Method3<object>() calli default string(class G2_C819`2<class BaseClass1,class BaseClass1>) ldstr "G2_C819::Method3.MI.12549<System.Object>()" ldstr "IBase0 on type class G2_C819`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 10 call void Generated1423::MethodCallingTest() call void Generated1423::ConstrainedCallsTest() call void Generated1423::StructConstrainedInterfaceCallsTest() call void Generated1423::CalliTest() ldc.i4 100 ret } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/tests/JIT/jit64/regress/vsw/files
102964 329169 373472 471729 517867 524070 528315 538615 539509 541067 543229 549880 601425 610178
102964 329169 373472 471729 517867 524070 528315 538615 539509 541067 543229 549880 601425 610178
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest682/Generated682.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated682.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated682.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/libraries/System.ComponentModel.Primitives/tests/TrimmingTests/VerifyCategoryNamesDontGetTrimmed.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.ComponentModel; namespace Test { /// <summary> /// When UseSystemResourceStrings feature switch is on, we want to validate that getting the resource string of /// a category attribute won't result in having "PropertyCategory" appended to the beginning of the resulting string. /// This test ensures that both built-in categories as well as custom categories get the right Category when the /// feature switch is on. /// </summary> public class Program { public static int Main() { if (GetEnumCategory(AnEnum.Action) == "Action" && GetEnumCategory(AnEnum.Something) == "Something" && GetEnumCategory(AnEnum.WindowStyle) == "Window Style") { return 100; } return -1; } public static string GetEnumCategory<T>(T enumValue) where T : struct, IConvertible { if (!typeof(T).IsEnum) return null; var enumCategory = enumValue.ToString(); var fieldInfo = enumValue.GetType().GetField(enumValue.ToString()); if (fieldInfo != null) { var attrs = fieldInfo.GetCustomAttributes(typeof(CategoryAttribute), false); if (attrs != null && attrs.Length > 0) { enumCategory = ((CategoryAttribute)attrs[0]).Category; } } return enumCategory; } } public enum AnEnum { [Category("Action")] // Built-in category Action = 1, [Category("Something")] // Custom category Something = 2, [Category("WindowStyle")] // Built-in category with localized string different than category name. WindowStyle = 3, } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.ComponentModel; namespace Test { /// <summary> /// When UseSystemResourceStrings feature switch is on, we want to validate that getting the resource string of /// a category attribute won't result in having "PropertyCategory" appended to the beginning of the resulting string. /// This test ensures that both built-in categories as well as custom categories get the right Category when the /// feature switch is on. /// </summary> public class Program { public static int Main() { if (GetEnumCategory(AnEnum.Action) == "Action" && GetEnumCategory(AnEnum.Something) == "Something" && GetEnumCategory(AnEnum.WindowStyle) == "Window Style") { return 100; } return -1; } public static string GetEnumCategory<T>(T enumValue) where T : struct, IConvertible { if (!typeof(T).IsEnum) return null; var enumCategory = enumValue.ToString(); var fieldInfo = enumValue.GetType().GetField(enumValue.ToString()); if (fieldInfo != null) { var attrs = fieldInfo.GetCustomAttributes(typeof(CategoryAttribute), false); if (attrs != null && attrs.Length > 0) { enumCategory = ((CategoryAttribute)attrs[0]).Category; } } return enumCategory; } } public enum AnEnum { [Category("Action")] // Built-in category Action = 1, [Category("Something")] // Custom category Something = 2, [Category("WindowStyle")] // Built-in category with localized string different than category name. WindowStyle = 3, } }
-1
dotnet/runtime
66,479
Add metrics for caching
Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
maryamariyan
2022-03-10T21:30:00Z
2022-04-08T18:16:17Z
51afa067be9f0d00dac2c455ceb78e38eef1e239
cef4ae17d91c278880f45f017e4272afe1f379b3
Add metrics for caching. Fixes https://github.com/dotnet/runtime/issues/50406 Adds the approved APIs: ```c# public partial class MemoryCache { /// <summary> /// Gets a snapshot of the current statistics for the memory cache. /// </summary> public MemoryCacheStatistics? GetCurrentStatistics() { throw null; } } public partial interface IMemoryCache { #if NET_7_OR_GREATER /// <summary> /// Gets a snapshot of the cache statistics if available. /// </summary> /// <returns>An instance of the <see cref="MemoryCacheStatistics"/> instance.</returns> public MemoryCacheStatistics? GetCurrentStatistics() => null; #endif } public partial class MemoryCacheStatistics { public MemoryCacheStatistics() {} public long TotalMisses { get; init; } public long TotalHits { get; init; } public long CurrentEntryCount { get; init; } public long? CurrentEstimatedSize { get; init; } } ``` Also: ```diff public partial class MemoryCacheOptions : Microsoft.Extensions.Options.IOptions<Microsoft.Extensions.Caching.Memory.MemoryCacheOptions> { + public bool TrackStatistics { get { throw null; } set { } } } ``` In this implementation, the Size is not in sync with the backing count from CoherentState's concurrent dictionary count, which means that the snapshot of the statistics may be out of sync when taken during heavy multithreaded load is happening, but the values will eventually become consistent once the heavy load is finished. (refer to the last test case added) TODO: - The PR is ready for review, in the meantime I am going to make sure we are not causing regression for tech empower tests, and will share the results here --- ### Alternative Approaches - If we wanted to be completely accurate with the snapshot values, returning count/size that are in sync, then I also had prototyped this change https://github.com/dotnet/runtime/pull/66479/commits/a68d3e4b9494fbaec1be7d2609bf55ba725fe84d by adding a separate `_cacheCount` that would always remain in sync with `_cacheSize`. To keep things simple I decided to go with the easiest fix drafted here in the PR instead which just returns the estimated size. - Another approach that could be explored would be to use a thread static object (e.g. `ThreadStats` that keeps track of count/size). The actual count/size would be the aggregated value of `ThreadStats` count/size for all of the threads present. Disadvantage with using this thread static approach is that since that way we are keeping track of a list of threads, we need additional code to keep track of threads that have gone stale over time. We could explore this option too later if we learn later that the current solution in the PR is not sufficient or performant enough.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/Negate.Vector128.Int32.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void Negate_Vector128_Int32() { var test = new SimpleUnaryOpTest__Negate_Vector128_Int32(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleUnaryOpTest__Negate_Vector128_Int32 { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(Int32[] inArray1, Int32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int32> _fld1; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); return testStruct; } public void RunStructFldScenario(SimpleUnaryOpTest__Negate_Vector128_Int32 testClass) { var result = AdvSimd.Negate(_fld1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleUnaryOpTest__Negate_Vector128_Int32 testClass) { fixed (Vector128<Int32>* pFld1 = &_fld1) { var result = AdvSimd.Negate( AdvSimd.LoadVector128((Int32*)(pFld1)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static Int32[] _data1 = new Int32[Op1ElementCount]; private static Vector128<Int32> _clsVar1; private Vector128<Int32> _fld1; private DataTable _dataTable; static SimpleUnaryOpTest__Negate_Vector128_Int32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); } public SimpleUnaryOpTest__Negate_Vector128_Int32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data1, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.Negate( Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.Negate( AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.Negate), new Type[] { typeof(Vector128<Int32>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.Negate), new Type[] { typeof(Vector128<Int32>) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.Negate( _clsVar1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int32>* pClsVar1 = &_clsVar1) { var result = AdvSimd.Negate( AdvSimd.LoadVector128((Int32*)(pClsVar1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr); var result = AdvSimd.Negate(op1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)); var result = AdvSimd.Negate(op1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleUnaryOpTest__Negate_Vector128_Int32(); var result = AdvSimd.Negate(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleUnaryOpTest__Negate_Vector128_Int32(); fixed (Vector128<Int32>* pFld1 = &test._fld1) { var result = AdvSimd.Negate( AdvSimd.LoadVector128((Int32*)(pFld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.Negate(_fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int32>* pFld1 = &_fld1) { var result = AdvSimd.Negate( AdvSimd.LoadVector128((Int32*)(pFld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.Negate(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.Negate( AdvSimd.LoadVector128((Int32*)(&test._fld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int32> op1, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(Int32[] firstOp, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.Negate(firstOp[i]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.Negate)}<Int32>(Vector128<Int32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void Negate_Vector128_Int32() { var test = new SimpleUnaryOpTest__Negate_Vector128_Int32(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleUnaryOpTest__Negate_Vector128_Int32 { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(Int32[] inArray1, Int32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int32> _fld1; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); return testStruct; } public void RunStructFldScenario(SimpleUnaryOpTest__Negate_Vector128_Int32 testClass) { var result = AdvSimd.Negate(_fld1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleUnaryOpTest__Negate_Vector128_Int32 testClass) { fixed (Vector128<Int32>* pFld1 = &_fld1) { var result = AdvSimd.Negate( AdvSimd.LoadVector128((Int32*)(pFld1)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static Int32[] _data1 = new Int32[Op1ElementCount]; private static Vector128<Int32> _clsVar1; private Vector128<Int32> _fld1; private DataTable _dataTable; static SimpleUnaryOpTest__Negate_Vector128_Int32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); } public SimpleUnaryOpTest__Negate_Vector128_Int32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data1, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.Negate( Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.Negate( AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.Negate), new Type[] { typeof(Vector128<Int32>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.Negate), new Type[] { typeof(Vector128<Int32>) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.Negate( _clsVar1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int32>* pClsVar1 = &_clsVar1) { var result = AdvSimd.Negate( AdvSimd.LoadVector128((Int32*)(pClsVar1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr); var result = AdvSimd.Negate(op1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)); var result = AdvSimd.Negate(op1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleUnaryOpTest__Negate_Vector128_Int32(); var result = AdvSimd.Negate(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleUnaryOpTest__Negate_Vector128_Int32(); fixed (Vector128<Int32>* pFld1 = &test._fld1) { var result = AdvSimd.Negate( AdvSimd.LoadVector128((Int32*)(pFld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.Negate(_fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int32>* pFld1 = &_fld1) { var result = AdvSimd.Negate( AdvSimd.LoadVector128((Int32*)(pFld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.Negate(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.Negate( AdvSimd.LoadVector128((Int32*)(&test._fld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int32> op1, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(Int32[] firstOp, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.Negate(firstOp[i]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.Negate)}<Int32>(Vector128<Int32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./eng/native/configureplatform.cmake
include(${CMAKE_CURRENT_LIST_DIR}/functions.cmake) # If set, indicates that this is not an officially supported release # Keep in sync with IsPrerelease in Directory.Build.props set(PRERELEASE 1) #---------------------------------------- # Detect and set platform variable names # - for non-windows build platform & architecture is detected using inbuilt CMAKE variables and cross target component configure # - for windows we use the passed in parameter to CMAKE to determine build arch #---------------------------------------- set(CLR_CMAKE_HOST_OS ${CMAKE_SYSTEM_NAME}) if(CLR_CMAKE_HOST_OS STREQUAL Linux) set(CLR_CMAKE_HOST_UNIX 1) if(CLR_CROSS_COMPONENTS_BUILD) # CMAKE_HOST_SYSTEM_PROCESSOR returns the value of `uname -p` on host. if(CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL x86_64 OR CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL amd64) if(CLR_CMAKE_TARGET_ARCH STREQUAL "arm" OR CLR_CMAKE_TARGET_ARCH STREQUAL "armel") if(CMAKE_CROSSCOMPILING) set(CLR_CMAKE_HOST_UNIX_X86 1) else() set(CLR_CMAKE_HOST_UNIX_AMD64 1) endif() else() set(CLR_CMAKE_HOST_UNIX_AMD64 1) endif() elseif(CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL i686) set(CLR_CMAKE_HOST_UNIX_X86 1) else() clr_unknown_arch() endif() else() # CMAKE_SYSTEM_PROCESSOR returns the value of `uname -p` on target. # For the AMD/Intel 64bit architecture two different strings are common. # Linux and Darwin identify it as "x86_64" while FreeBSD and netbsd uses the # "amd64" string. Accept either of the two here. if(CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL amd64) set(CLR_CMAKE_HOST_UNIX_AMD64 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL armv7l) set(CLR_CMAKE_HOST_UNIX_ARM 1) set(CLR_CMAKE_HOST_UNIX_ARMV7L 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL arm OR CMAKE_SYSTEM_PROCESSOR STREQUAL armv7-a) set(CLR_CMAKE_HOST_UNIX_ARM 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL armv6 OR CMAKE_SYSTEM_PROCESSOR STREQUAL armv6l) set(CLR_CMAKE_HOST_UNIX_ARMV6 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL arm64) set(CLR_CMAKE_HOST_UNIX_ARM64 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL loongarch64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL loongarch64) set(CLR_CMAKE_HOST_UNIX_LOONGARCH64 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL i686 OR CMAKE_SYSTEM_PROCESSOR STREQUAL x86) set(CLR_CMAKE_HOST_UNIX_X86 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL s390x) set(CLR_CMAKE_HOST_UNIX_S390X 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL mips64) set(CLR_CMAKE_HOST_UNIX_MIPS64 1) else() clr_unknown_arch() endif() endif() set(CLR_CMAKE_HOST_LINUX 1) # Detect Linux ID set(LINUX_ID_FILE "/etc/os-release") if(CMAKE_CROSSCOMPILING) set(LINUX_ID_FILE "${CMAKE_SYSROOT}${LINUX_ID_FILE}") endif() if(EXISTS ${LINUX_ID_FILE}) execute_process( COMMAND bash -c "source ${LINUX_ID_FILE} && echo \$ID" OUTPUT_VARIABLE CLR_CMAKE_LINUX_ID OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process( COMMAND bash -c "if strings \"${CMAKE_SYSROOT}/usr/bin/ldd\" 2>&1 | grep -q musl; then echo musl; fi" OUTPUT_VARIABLE CLR_CMAKE_LINUX_MUSL OUTPUT_STRIP_TRAILING_WHITESPACE) endif() if(DEFINED CLR_CMAKE_LINUX_ID) if(CLR_CMAKE_LINUX_ID STREQUAL tizen) set(CLR_CMAKE_TARGET_TIZEN_LINUX 1) set(CLR_CMAKE_HOST_OS ${CLR_CMAKE_LINUX_ID}) elseif(CLR_CMAKE_LINUX_ID STREQUAL alpine) set(CLR_CMAKE_HOST_ALPINE_LINUX 1) set(CLR_CMAKE_HOST_OS ${CLR_CMAKE_LINUX_ID}) endif() if(CLR_CMAKE_LINUX_MUSL STREQUAL musl) set(CLR_CMAKE_HOST_LINUX_MUSL 1) endif() endif(DEFINED CLR_CMAKE_LINUX_ID) endif(CLR_CMAKE_HOST_OS STREQUAL Linux) if(CLR_CMAKE_HOST_OS STREQUAL Darwin) set(CLR_CMAKE_HOST_UNIX 1) if(CMAKE_SYSTEM_VARIANT STREQUAL MacCatalyst) set(CLR_CMAKE_HOST_MACCATALYST 1) else() set(CLR_CMAKE_HOST_OSX 1) endif(CMAKE_SYSTEM_VARIANT STREQUAL MacCatalyst) if(CMAKE_OSX_ARCHITECTURES STREQUAL x86_64) set(CLR_CMAKE_HOST_UNIX_AMD64 1) elseif(CMAKE_OSX_ARCHITECTURES STREQUAL arm64) set(CLR_CMAKE_HOST_UNIX_ARM64 1) else() clr_unknown_arch() endif() set(CMAKE_ASM_COMPILE_OBJECT "${CMAKE_C_COMPILER} <FLAGS> <DEFINES> <INCLUDES> -o <OBJECT> -c <SOURCE>") endif(CLR_CMAKE_HOST_OS STREQUAL Darwin) if(CLR_CMAKE_HOST_OS STREQUAL iOS OR CLR_CMAKE_HOST_OS STREQUAL iOSSimulator) set(CLR_CMAKE_HOST_UNIX 1) set(CLR_CMAKE_HOST_IOS 1) if(CMAKE_OSX_ARCHITECTURES MATCHES "x86_64") set(CLR_CMAKE_HOST_UNIX_AMD64 1) elseif(CMAKE_OSX_ARCHITECTURES MATCHES "i386") set(CLR_CMAKE_HOST_UNIX_X86 1) elseif(CMAKE_OSX_ARCHITECTURES MATCHES "armv7") set(CLR_CMAKE_HOST_UNIX_ARM 1) elseif(CMAKE_OSX_ARCHITECTURES MATCHES "arm64") set(CLR_CMAKE_HOST_UNIX_ARM64 1) else() clr_unknown_arch() endif() endif(CLR_CMAKE_HOST_OS STREQUAL iOS OR CLR_CMAKE_HOST_OS STREQUAL iOSSimulator) if(CLR_CMAKE_HOST_OS STREQUAL tvOS OR CLR_CMAKE_HOST_OS STREQUAL tvOSSimulator) set(CLR_CMAKE_HOST_UNIX 1) set(CLR_CMAKE_HOST_TVOS 1) if(CMAKE_OSX_ARCHITECTURES MATCHES "x86_64") set(CLR_CMAKE_HOST_UNIX_AMD64 1) elseif(CMAKE_OSX_ARCHITECTURES MATCHES "arm64") set(CLR_CMAKE_HOST_UNIX_ARM64 1) else() clr_unknown_arch() endif() endif(CLR_CMAKE_HOST_OS STREQUAL tvOS OR CLR_CMAKE_HOST_OS STREQUAL tvOSSimulator) if(CLR_CMAKE_HOST_OS STREQUAL Android) set(CLR_CMAKE_HOST_UNIX 1) set(CLR_CMAKE_HOST_LINUX 1) set(CLR_CMAKE_HOST_ANDROID 1) if(CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64) set(CLR_CMAKE_HOST_UNIX_AMD64 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL armv7-a) set(CLR_CMAKE_HOST_UNIX_ARM 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64) set(CLR_CMAKE_HOST_UNIX_ARM64 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL i686) set(CLR_CMAKE_HOST_UNIX_X86 1) else() clr_unknown_arch() endif() endif(CLR_CMAKE_HOST_OS STREQUAL Android) if(CLR_CMAKE_HOST_OS STREQUAL FreeBSD) set(CLR_CMAKE_HOST_UNIX 1) set(CLR_CMAKE_HOST_UNIX_AMD64 1) set(CLR_CMAKE_HOST_FREEBSD 1) endif(CLR_CMAKE_HOST_OS STREQUAL FreeBSD) if(CLR_CMAKE_HOST_OS STREQUAL OpenBSD) set(CLR_CMAKE_HOST_UNIX 1) set(CLR_CMAKE_HOST_UNIX_AMD64 1) set(CLR_CMAKE_HOST_OPENBSD 1) endif(CLR_CMAKE_HOST_OS STREQUAL OpenBSD) if(CLR_CMAKE_HOST_OS STREQUAL NetBSD) set(CLR_CMAKE_HOST_UNIX 1) set(CLR_CMAKE_HOST_UNIX_AMD64 1) set(CLR_CMAKE_HOST_NETBSD 1) endif(CLR_CMAKE_HOST_OS STREQUAL NetBSD) if(CLR_CMAKE_HOST_OS STREQUAL SunOS) set(CLR_CMAKE_HOST_UNIX 1) EXECUTE_PROCESS( COMMAND isainfo -n OUTPUT_VARIABLE SUNOS_NATIVE_INSTRUCTION_SET) if(SUNOS_NATIVE_INSTRUCTION_SET MATCHES "amd64" OR CMAKE_CROSSCOMPILING) set(CLR_CMAKE_HOST_UNIX_AMD64 1) set(CMAKE_SYSTEM_PROCESSOR "amd64") else() clr_unknown_arch() endif() EXECUTE_PROCESS( COMMAND uname -o OUTPUT_VARIABLE SUNOS_KERNEL_KIND ERROR_QUIET) set(CLR_CMAKE_HOST_SUNOS 1) if(SUNOS_KERNEL_KIND STREQUAL illumos OR CMAKE_CROSSCOMPILING) set(CLR_CMAKE_HOST_OS_ILLUMOS 1) else(SUNOS_KERNEL_KIND STREQUAL illumos OR CMAKE_CROSSCOMPILING) set(CLR_CMAKE_HOST_OS_SOLARIS 1) endif(SUNOS_KERNEL_KIND STREQUAL illumos OR CMAKE_CROSSCOMPILING) endif(CLR_CMAKE_HOST_OS STREQUAL SunOS) if(CLR_CMAKE_HOST_OS STREQUAL Windows) set(CLR_CMAKE_HOST_OS windows) set(CLR_CMAKE_HOST_WIN32 1) endif(CLR_CMAKE_HOST_OS STREQUAL Windows) if(CLR_CMAKE_HOST_OS STREQUAL Emscripten) set(CLR_CMAKE_HOST_UNIX 1) set(CLR_CMAKE_HOST_BROWSER 1) endif(CLR_CMAKE_HOST_OS STREQUAL Emscripten) #-------------------------------------------- # This repo builds two set of binaries # 1. binaries which execute on target arch machine # - for such binaries host architecture & target architecture are same # - eg. coreclr.dll # 2. binaries which execute on host machine but target another architecture # - host architecture is different from target architecture # - eg. crossgen.exe - runs on x64 machine and generates nis targeting arm64 # - for complete list of such binaries refer to file crosscomponents.cmake #------------------------------------------------------------- # Set HOST architecture variables if(CLR_CMAKE_HOST_UNIX_ARM) set(CLR_CMAKE_HOST_ARCH_ARM 1) set(CLR_CMAKE_HOST_ARCH "arm") if(CLR_CMAKE_HOST_UNIX_ARMV7L) set(CLR_CMAKE_HOST_ARCH_ARMV7L 1) endif() elseif(CLR_CMAKE_HOST_UNIX_ARMV6) set(CLR_CMAKE_HOST_ARCH_ARMV6 1) set(CLR_CMAKE_HOST_ARCH "armv6") if(CLR_CMAKE_HOST_UNIX_ARMV6L) set(CLR_CMAKE_HOST_ARCH_ARMV6L 1) endif() elseif(CLR_CMAKE_HOST_UNIX_ARM64) set(CLR_CMAKE_HOST_ARCH_ARM64 1) set(CLR_CMAKE_HOST_ARCH "arm64") elseif(CLR_CMAKE_HOST_UNIX_LOONGARCH64) set(CLR_CMAKE_HOST_ARCH_LOONGARCH64 1) set(CLR_CMAKE_HOST_ARCH "loongarch64") elseif(CLR_CMAKE_HOST_UNIX_AMD64) set(CLR_CMAKE_HOST_ARCH_AMD64 1) set(CLR_CMAKE_HOST_ARCH "x64") elseif(CLR_CMAKE_HOST_UNIX_X86) set(CLR_CMAKE_HOST_ARCH_I386 1) set(CLR_CMAKE_HOST_ARCH "x86") elseif(CLR_CMAKE_HOST_UNIX_S390X) set(CLR_CMAKE_HOST_ARCH_S390X 1) set(CLR_CMAKE_HOST_ARCH "s390x") elseif(CLR_CMAKE_HOST_BROWSER) set(CLR_CMAKE_HOST_ARCH_WASM 1) set(CLR_CMAKE_HOST_ARCH "wasm") elseif(CLR_CMAKE_HOST_UNIX_MIPS64) set(CLR_CMAKE_HOST_ARCH_MIPS64 1) set(CLR_CMAKE_HOST_ARCH "mips64") elseif(WIN32) # CLR_CMAKE_HOST_ARCH is passed in as param to cmake if (CLR_CMAKE_HOST_ARCH STREQUAL x64) set(CLR_CMAKE_HOST_ARCH_AMD64 1) elseif(CLR_CMAKE_HOST_ARCH STREQUAL x86) set(CLR_CMAKE_HOST_ARCH_I386 1) elseif(CLR_CMAKE_HOST_ARCH STREQUAL arm) set(CLR_CMAKE_HOST_ARCH_ARM 1) elseif(CLR_CMAKE_HOST_ARCH STREQUAL arm64) set(CLR_CMAKE_HOST_ARCH_ARM64 1) else() clr_unknown_arch() endif() endif() # Set TARGET architecture variables # Target arch will be a cmake param (optional) for both windows as well as non-windows build # if target arch is not specified then host & target are same if(NOT DEFINED CLR_CMAKE_TARGET_ARCH OR CLR_CMAKE_TARGET_ARCH STREQUAL "" ) set(CLR_CMAKE_TARGET_ARCH ${CLR_CMAKE_HOST_ARCH}) # This is required for "arm" targets (CMAKE_SYSTEM_PROCESSOR "armv7l"), # for which this flag otherwise won't be set up below if (CLR_CMAKE_HOST_ARCH_ARMV7L) set(CLR_CMAKE_TARGET_ARCH_ARMV7L 1) endif() endif() # Set target architecture variables if (CLR_CMAKE_TARGET_ARCH STREQUAL x64) set(CLR_CMAKE_TARGET_ARCH_AMD64 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL x86) set(CLR_CMAKE_TARGET_ARCH_I386 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL arm64) set(CLR_CMAKE_TARGET_ARCH_ARM64 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL loongarch64) set(CLR_CMAKE_TARGET_ARCH_LOONGARCH64 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL arm) set(CLR_CMAKE_TARGET_ARCH_ARM 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL armv6) set(CLR_CMAKE_TARGET_ARCH_ARMV6 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL armel) set(CLR_CMAKE_TARGET_ARCH_ARM 1) set(CLR_CMAKE_TARGET_ARCH_ARMV7L 1) set(ARM_SOFTFP 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL s390x) set(CLR_CMAKE_TARGET_ARCH_S390X 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL wasm) set(CLR_CMAKE_TARGET_ARCH_WASM 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL mips64) set(CLR_CMAKE_TARGET_ARCH_MIPS64 1) else() clr_unknown_arch() endif() # Set TARGET architecture variables # Target os will be a cmake param (optional) for both windows as well as non-windows build # if target os is not specified then host & target os are same if (NOT DEFINED CLR_CMAKE_TARGET_OS OR CLR_CMAKE_TARGET_OS STREQUAL "" ) set(CLR_CMAKE_TARGET_OS ${CLR_CMAKE_HOST_OS}) endif() if(CLR_CMAKE_TARGET_OS STREQUAL Linux) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_LINUX 1) endif(CLR_CMAKE_TARGET_OS STREQUAL Linux) if(CLR_CMAKE_HOST_LINUX_MUSL) set(CLR_CMAKE_TARGET_LINUX_MUSL 1) endif(CLR_CMAKE_HOST_LINUX_MUSL) if(CLR_CMAKE_TARGET_OS STREQUAL tizen) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_LINUX 1) set(CLR_CMAKE_TARGET_TIZEN_LINUX 1) endif(CLR_CMAKE_TARGET_OS STREQUAL tizen) if(CLR_CMAKE_TARGET_OS STREQUAL alpine) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_LINUX 1) set(CLR_CMAKE_TARGET_ALPINE_LINUX 1) endif(CLR_CMAKE_TARGET_OS STREQUAL alpine) if(CLR_CMAKE_TARGET_OS STREQUAL Android) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_LINUX 1) set(CLR_CMAKE_TARGET_ANDROID 1) endif(CLR_CMAKE_TARGET_OS STREQUAL Android) if(CLR_CMAKE_TARGET_OS STREQUAL Darwin) set(CLR_CMAKE_TARGET_UNIX 1) if(CMAKE_SYSTEM_VARIANT STREQUAL MacCatalyst) set(CLR_CMAKE_TARGET_MACCATALYST 1) else() set(CLR_CMAKE_TARGET_OSX 1) endif(CMAKE_SYSTEM_VARIANT STREQUAL MacCatalyst) endif(CLR_CMAKE_TARGET_OS STREQUAL Darwin) if(CLR_CMAKE_TARGET_OS STREQUAL iOS OR CLR_CMAKE_TARGET_OS STREQUAL iOSSimulator) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_IOS 1) endif(CLR_CMAKE_TARGET_OS STREQUAL iOS OR CLR_CMAKE_TARGET_OS STREQUAL iOSSimulator) if(CLR_CMAKE_TARGET_OS STREQUAL tvOS OR CLR_CMAKE_TARGET_OS STREQUAL tvOSSimulator) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_TVOS 1) endif(CLR_CMAKE_TARGET_OS STREQUAL tvOS OR CLR_CMAKE_TARGET_OS STREQUAL tvOSSimulator) if(CLR_CMAKE_TARGET_OS STREQUAL FreeBSD) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_FREEBSD 1) endif(CLR_CMAKE_TARGET_OS STREQUAL FreeBSD) if(CLR_CMAKE_TARGET_OS STREQUAL OpenBSD) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_OPENBSD 1) endif(CLR_CMAKE_TARGET_OS STREQUAL OpenBSD) if(CLR_CMAKE_TARGET_OS STREQUAL NetBSD) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_NETBSD 1) endif(CLR_CMAKE_TARGET_OS STREQUAL NetBSD) if(CLR_CMAKE_TARGET_OS STREQUAL SunOS) set(CLR_CMAKE_TARGET_UNIX 1) if(CLR_CMAKE_HOST_OS_ILLUMOS) set(CLR_CMAKE_TARGET_OS_ILLUMOS 1) else(CLR_CMAKE_HOST_OS_ILLUMOS) set(CLR_CMAKE_TARGET_OS_SOLARIS 1) endif(CLR_CMAKE_HOST_OS_ILLUMOS) set(CLR_CMAKE_TARGET_SUNOS 1) endif(CLR_CMAKE_TARGET_OS STREQUAL SunOS) if(CLR_CMAKE_TARGET_OS STREQUAL Emscripten) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_LINUX 1) set(CLR_CMAKE_TARGET_BROWSER 1) endif(CLR_CMAKE_TARGET_OS STREQUAL Emscripten) if(CLR_CMAKE_TARGET_UNIX) if(CLR_CMAKE_TARGET_ARCH STREQUAL x64) set(CLR_CMAKE_TARGET_UNIX_AMD64 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL armel) set(CLR_CMAKE_TARGET_UNIX_ARM 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL arm) set(CLR_CMAKE_TARGET_UNIX_ARM 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL armv6) set(CLR_CMAKE_TARGET_UNIX_ARMV6 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL arm64) set(CLR_CMAKE_TARGET_UNIX_ARM64 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL loongarch64) set(CLR_CMAKE_TARGET_UNIX_LOONGARCH64 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL x86) set(CLR_CMAKE_TARGET_UNIX_X86 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL s390x) set(CLR_CMAKE_TARGET_UNIX_S390X 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL wasm) set(CLR_CMAKE_TARGET_UNIX_WASM 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL mips64) set(CLR_CMAKE_TARGET_UNIX_MIPS64 1) else() clr_unknown_arch() endif() else() set(CLR_CMAKE_TARGET_WIN32 1) endif(CLR_CMAKE_TARGET_UNIX) # check if host & target os/arch combination are valid if (CLR_CMAKE_TARGET_OS STREQUAL CLR_CMAKE_HOST_OS) if(NOT(CLR_CMAKE_TARGET_ARCH STREQUAL CLR_CMAKE_HOST_ARCH)) if(NOT((CLR_CMAKE_HOST_ARCH_AMD64 AND CLR_CMAKE_TARGET_ARCH_ARM64) OR (CLR_CMAKE_HOST_ARCH_I386 AND CLR_CMAKE_TARGET_ARCH_ARM) OR (CLR_CMAKE_HOST_ARCH_AMD64 AND CLR_CMAKE_TARGET_ARCH_ARM) OR (CLR_CMAKE_HOST_ARCH_AMD64 AND CLR_CMAKE_TARGET_ARCH_I386))) message(FATAL_ERROR "Invalid platform and target arch combination TARGET_ARCH=${CLR_CMAKE_TARGET_ARCH} HOST_ARCH=${CLR_CMAKE_HOST_ARCH}") endif() endif() else() if(NOT (CLR_CMAKE_HOST_OS STREQUAL windows)) message(FATAL_ERROR "Invalid host and target os/arch combination. Host OS: ${CLR_CMAKE_HOST_OS}") endif() if(NOT (CLR_CMAKE_TARGET_LINUX OR CLR_CMAKE_TARGET_ALPINE_LINUX)) message(FATAL_ERROR "Invalid host and target os/arch combination. Target OS: ${CLR_CMAKE_TARGET_OS}") endif() if(NOT ((CLR_CMAKE_HOST_ARCH_AMD64 AND (CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64)) OR (CLR_CMAKE_HOST_ARCH_I386 AND CLR_CMAKE_TARGET_ARCH_ARM))) message(FATAL_ERROR "Invalid host and target os/arch combination. Host Arch: ${CLR_CMAKE_HOST_ARCH} Target Arch: ${CLR_CMAKE_TARGET_ARCH}") endif() endif() if(NOT CLR_CMAKE_TARGET_BROWSER) # The default linker on Solaris also does not support PIE. if(NOT CLR_CMAKE_TARGET_ANDROID AND NOT CLR_CMAKE_TARGET_SUNOS AND NOT CLR_CMAKE_TARGET_OSX AND NOT CLR_CMAKE_TARGET_MACCATALYST AND NOT CLR_CMAKE_HOST_TVOS AND NOT CLR_CMAKE_HOST_IOS AND NOT MSVC) set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie") add_compile_options($<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,EXECUTABLE>:-fPIE>) add_compile_options($<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,SHARED_LIBRARY>:-fPIC>) endif() set(CMAKE_POSITION_INDEPENDENT_CODE ON) endif() string(TOLOWER "${CMAKE_BUILD_TYPE}" LOWERCASE_CMAKE_BUILD_TYPE) if(LOWERCASE_CMAKE_BUILD_TYPE STREQUAL debug) # Clear _FORTIFY_SOURCE=2, if set string(REPLACE "-D_FORTIFY_SOURCE=2 " "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") string(REPLACE "-D_FORTIFY_SOURCE=2 " "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") endif()
include(${CMAKE_CURRENT_LIST_DIR}/functions.cmake) # If set, indicates that this is not an officially supported release # Keep in sync with IsPrerelease in Directory.Build.props set(PRERELEASE 1) #---------------------------------------- # Detect and set platform variable names # - for non-windows build platform & architecture is detected using inbuilt CMAKE variables and cross target component configure # - for windows we use the passed in parameter to CMAKE to determine build arch #---------------------------------------- set(CLR_CMAKE_HOST_OS ${CMAKE_SYSTEM_NAME}) if(CLR_CMAKE_HOST_OS STREQUAL Linux) set(CLR_CMAKE_HOST_UNIX 1) if(CLR_CROSS_COMPONENTS_BUILD) # CMAKE_HOST_SYSTEM_PROCESSOR returns the value of `uname -p` on host. if(CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL x86_64 OR CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL amd64) if(CLR_CMAKE_TARGET_ARCH STREQUAL "arm" OR CLR_CMAKE_TARGET_ARCH STREQUAL "armel") if(CMAKE_CROSSCOMPILING) set(CLR_CMAKE_HOST_UNIX_X86 1) else() set(CLR_CMAKE_HOST_UNIX_AMD64 1) endif() else() set(CLR_CMAKE_HOST_UNIX_AMD64 1) endif() elseif(CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL i686) set(CLR_CMAKE_HOST_UNIX_X86 1) else() clr_unknown_arch() endif() else() # CMAKE_SYSTEM_PROCESSOR returns the value of `uname -p` on target. # For the AMD/Intel 64bit architecture two different strings are common. # Linux and Darwin identify it as "x86_64" while FreeBSD and netbsd uses the # "amd64" string. Accept either of the two here. if(CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL amd64) set(CLR_CMAKE_HOST_UNIX_AMD64 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL armv7l OR CMAKE_SYSTEM_PROCESSOR STREQUAL armv8l) set(CLR_CMAKE_HOST_UNIX_ARM 1) set(CLR_CMAKE_HOST_UNIX_ARMV7L 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL arm OR CMAKE_SYSTEM_PROCESSOR STREQUAL armv7-a) set(CLR_CMAKE_HOST_UNIX_ARM 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL armv6 OR CMAKE_SYSTEM_PROCESSOR STREQUAL armv6l) set(CLR_CMAKE_HOST_UNIX_ARMV6 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL arm64) set(CLR_CMAKE_HOST_UNIX_ARM64 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL loongarch64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL loongarch64) set(CLR_CMAKE_HOST_UNIX_LOONGARCH64 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL i686 OR CMAKE_SYSTEM_PROCESSOR STREQUAL x86) set(CLR_CMAKE_HOST_UNIX_X86 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL s390x) set(CLR_CMAKE_HOST_UNIX_S390X 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL mips64) set(CLR_CMAKE_HOST_UNIX_MIPS64 1) else() clr_unknown_arch() endif() endif() set(CLR_CMAKE_HOST_LINUX 1) # Detect Linux ID set(LINUX_ID_FILE "/etc/os-release") if(CMAKE_CROSSCOMPILING) set(LINUX_ID_FILE "${CMAKE_SYSROOT}${LINUX_ID_FILE}") endif() if(EXISTS ${LINUX_ID_FILE}) execute_process( COMMAND bash -c "source ${LINUX_ID_FILE} && echo \$ID" OUTPUT_VARIABLE CLR_CMAKE_LINUX_ID OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process( COMMAND bash -c "if strings \"${CMAKE_SYSROOT}/usr/bin/ldd\" 2>&1 | grep -q musl; then echo musl; fi" OUTPUT_VARIABLE CLR_CMAKE_LINUX_MUSL OUTPUT_STRIP_TRAILING_WHITESPACE) endif() if(DEFINED CLR_CMAKE_LINUX_ID) if(CLR_CMAKE_LINUX_ID STREQUAL tizen) set(CLR_CMAKE_TARGET_TIZEN_LINUX 1) set(CLR_CMAKE_HOST_OS ${CLR_CMAKE_LINUX_ID}) elseif(CLR_CMAKE_LINUX_ID STREQUAL alpine) set(CLR_CMAKE_HOST_ALPINE_LINUX 1) set(CLR_CMAKE_HOST_OS ${CLR_CMAKE_LINUX_ID}) endif() if(CLR_CMAKE_LINUX_MUSL STREQUAL musl) set(CLR_CMAKE_HOST_LINUX_MUSL 1) endif() endif(DEFINED CLR_CMAKE_LINUX_ID) endif(CLR_CMAKE_HOST_OS STREQUAL Linux) if(CLR_CMAKE_HOST_OS STREQUAL Darwin) set(CLR_CMAKE_HOST_UNIX 1) if(CMAKE_SYSTEM_VARIANT STREQUAL MacCatalyst) set(CLR_CMAKE_HOST_MACCATALYST 1) else() set(CLR_CMAKE_HOST_OSX 1) endif(CMAKE_SYSTEM_VARIANT STREQUAL MacCatalyst) if(CMAKE_OSX_ARCHITECTURES STREQUAL x86_64) set(CLR_CMAKE_HOST_UNIX_AMD64 1) elseif(CMAKE_OSX_ARCHITECTURES STREQUAL arm64) set(CLR_CMAKE_HOST_UNIX_ARM64 1) else() clr_unknown_arch() endif() set(CMAKE_ASM_COMPILE_OBJECT "${CMAKE_C_COMPILER} <FLAGS> <DEFINES> <INCLUDES> -o <OBJECT> -c <SOURCE>") endif(CLR_CMAKE_HOST_OS STREQUAL Darwin) if(CLR_CMAKE_HOST_OS STREQUAL iOS OR CLR_CMAKE_HOST_OS STREQUAL iOSSimulator) set(CLR_CMAKE_HOST_UNIX 1) set(CLR_CMAKE_HOST_IOS 1) if(CMAKE_OSX_ARCHITECTURES MATCHES "x86_64") set(CLR_CMAKE_HOST_UNIX_AMD64 1) elseif(CMAKE_OSX_ARCHITECTURES MATCHES "i386") set(CLR_CMAKE_HOST_UNIX_X86 1) elseif(CMAKE_OSX_ARCHITECTURES MATCHES "armv7") set(CLR_CMAKE_HOST_UNIX_ARM 1) elseif(CMAKE_OSX_ARCHITECTURES MATCHES "arm64") set(CLR_CMAKE_HOST_UNIX_ARM64 1) else() clr_unknown_arch() endif() endif(CLR_CMAKE_HOST_OS STREQUAL iOS OR CLR_CMAKE_HOST_OS STREQUAL iOSSimulator) if(CLR_CMAKE_HOST_OS STREQUAL tvOS OR CLR_CMAKE_HOST_OS STREQUAL tvOSSimulator) set(CLR_CMAKE_HOST_UNIX 1) set(CLR_CMAKE_HOST_TVOS 1) if(CMAKE_OSX_ARCHITECTURES MATCHES "x86_64") set(CLR_CMAKE_HOST_UNIX_AMD64 1) elseif(CMAKE_OSX_ARCHITECTURES MATCHES "arm64") set(CLR_CMAKE_HOST_UNIX_ARM64 1) else() clr_unknown_arch() endif() endif(CLR_CMAKE_HOST_OS STREQUAL tvOS OR CLR_CMAKE_HOST_OS STREQUAL tvOSSimulator) if(CLR_CMAKE_HOST_OS STREQUAL Android) set(CLR_CMAKE_HOST_UNIX 1) set(CLR_CMAKE_HOST_LINUX 1) set(CLR_CMAKE_HOST_ANDROID 1) if(CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64) set(CLR_CMAKE_HOST_UNIX_AMD64 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL armv7-a) set(CLR_CMAKE_HOST_UNIX_ARM 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64) set(CLR_CMAKE_HOST_UNIX_ARM64 1) elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL i686) set(CLR_CMAKE_HOST_UNIX_X86 1) else() clr_unknown_arch() endif() endif(CLR_CMAKE_HOST_OS STREQUAL Android) if(CLR_CMAKE_HOST_OS STREQUAL FreeBSD) set(CLR_CMAKE_HOST_UNIX 1) set(CLR_CMAKE_HOST_UNIX_AMD64 1) set(CLR_CMAKE_HOST_FREEBSD 1) endif(CLR_CMAKE_HOST_OS STREQUAL FreeBSD) if(CLR_CMAKE_HOST_OS STREQUAL OpenBSD) set(CLR_CMAKE_HOST_UNIX 1) set(CLR_CMAKE_HOST_UNIX_AMD64 1) set(CLR_CMAKE_HOST_OPENBSD 1) endif(CLR_CMAKE_HOST_OS STREQUAL OpenBSD) if(CLR_CMAKE_HOST_OS STREQUAL NetBSD) set(CLR_CMAKE_HOST_UNIX 1) set(CLR_CMAKE_HOST_UNIX_AMD64 1) set(CLR_CMAKE_HOST_NETBSD 1) endif(CLR_CMAKE_HOST_OS STREQUAL NetBSD) if(CLR_CMAKE_HOST_OS STREQUAL SunOS) set(CLR_CMAKE_HOST_UNIX 1) EXECUTE_PROCESS( COMMAND isainfo -n OUTPUT_VARIABLE SUNOS_NATIVE_INSTRUCTION_SET) if(SUNOS_NATIVE_INSTRUCTION_SET MATCHES "amd64" OR CMAKE_CROSSCOMPILING) set(CLR_CMAKE_HOST_UNIX_AMD64 1) set(CMAKE_SYSTEM_PROCESSOR "amd64") else() clr_unknown_arch() endif() EXECUTE_PROCESS( COMMAND uname -o OUTPUT_VARIABLE SUNOS_KERNEL_KIND ERROR_QUIET) set(CLR_CMAKE_HOST_SUNOS 1) if(SUNOS_KERNEL_KIND STREQUAL illumos OR CMAKE_CROSSCOMPILING) set(CLR_CMAKE_HOST_OS_ILLUMOS 1) else(SUNOS_KERNEL_KIND STREQUAL illumos OR CMAKE_CROSSCOMPILING) set(CLR_CMAKE_HOST_OS_SOLARIS 1) endif(SUNOS_KERNEL_KIND STREQUAL illumos OR CMAKE_CROSSCOMPILING) endif(CLR_CMAKE_HOST_OS STREQUAL SunOS) if(CLR_CMAKE_HOST_OS STREQUAL Windows) set(CLR_CMAKE_HOST_OS windows) set(CLR_CMAKE_HOST_WIN32 1) endif(CLR_CMAKE_HOST_OS STREQUAL Windows) if(CLR_CMAKE_HOST_OS STREQUAL Emscripten) set(CLR_CMAKE_HOST_UNIX 1) set(CLR_CMAKE_HOST_BROWSER 1) endif(CLR_CMAKE_HOST_OS STREQUAL Emscripten) #-------------------------------------------- # This repo builds two set of binaries # 1. binaries which execute on target arch machine # - for such binaries host architecture & target architecture are same # - eg. coreclr.dll # 2. binaries which execute on host machine but target another architecture # - host architecture is different from target architecture # - eg. crossgen.exe - runs on x64 machine and generates nis targeting arm64 # - for complete list of such binaries refer to file crosscomponents.cmake #------------------------------------------------------------- # Set HOST architecture variables if(CLR_CMAKE_HOST_UNIX_ARM) set(CLR_CMAKE_HOST_ARCH_ARM 1) set(CLR_CMAKE_HOST_ARCH "arm") if(CLR_CMAKE_HOST_UNIX_ARMV7L) set(CLR_CMAKE_HOST_ARCH_ARMV7L 1) endif() elseif(CLR_CMAKE_HOST_UNIX_ARMV6) set(CLR_CMAKE_HOST_ARCH_ARMV6 1) set(CLR_CMAKE_HOST_ARCH "armv6") if(CLR_CMAKE_HOST_UNIX_ARMV6L) set(CLR_CMAKE_HOST_ARCH_ARMV6L 1) endif() elseif(CLR_CMAKE_HOST_UNIX_ARM64) set(CLR_CMAKE_HOST_ARCH_ARM64 1) set(CLR_CMAKE_HOST_ARCH "arm64") elseif(CLR_CMAKE_HOST_UNIX_LOONGARCH64) set(CLR_CMAKE_HOST_ARCH_LOONGARCH64 1) set(CLR_CMAKE_HOST_ARCH "loongarch64") elseif(CLR_CMAKE_HOST_UNIX_AMD64) set(CLR_CMAKE_HOST_ARCH_AMD64 1) set(CLR_CMAKE_HOST_ARCH "x64") elseif(CLR_CMAKE_HOST_UNIX_X86) set(CLR_CMAKE_HOST_ARCH_I386 1) set(CLR_CMAKE_HOST_ARCH "x86") elseif(CLR_CMAKE_HOST_UNIX_S390X) set(CLR_CMAKE_HOST_ARCH_S390X 1) set(CLR_CMAKE_HOST_ARCH "s390x") elseif(CLR_CMAKE_HOST_BROWSER) set(CLR_CMAKE_HOST_ARCH_WASM 1) set(CLR_CMAKE_HOST_ARCH "wasm") elseif(CLR_CMAKE_HOST_UNIX_MIPS64) set(CLR_CMAKE_HOST_ARCH_MIPS64 1) set(CLR_CMAKE_HOST_ARCH "mips64") elseif(WIN32) # CLR_CMAKE_HOST_ARCH is passed in as param to cmake if (CLR_CMAKE_HOST_ARCH STREQUAL x64) set(CLR_CMAKE_HOST_ARCH_AMD64 1) elseif(CLR_CMAKE_HOST_ARCH STREQUAL x86) set(CLR_CMAKE_HOST_ARCH_I386 1) elseif(CLR_CMAKE_HOST_ARCH STREQUAL arm) set(CLR_CMAKE_HOST_ARCH_ARM 1) elseif(CLR_CMAKE_HOST_ARCH STREQUAL arm64) set(CLR_CMAKE_HOST_ARCH_ARM64 1) else() clr_unknown_arch() endif() endif() # Set TARGET architecture variables # Target arch will be a cmake param (optional) for both windows as well as non-windows build # if target arch is not specified then host & target are same if(NOT DEFINED CLR_CMAKE_TARGET_ARCH OR CLR_CMAKE_TARGET_ARCH STREQUAL "" ) set(CLR_CMAKE_TARGET_ARCH ${CLR_CMAKE_HOST_ARCH}) # This is required for "arm" targets (CMAKE_SYSTEM_PROCESSOR "armv7l"), # for which this flag otherwise won't be set up below if (CLR_CMAKE_HOST_ARCH_ARMV7L) set(CLR_CMAKE_TARGET_ARCH_ARMV7L 1) endif() endif() # Set target architecture variables if (CLR_CMAKE_TARGET_ARCH STREQUAL x64) set(CLR_CMAKE_TARGET_ARCH_AMD64 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL x86) set(CLR_CMAKE_TARGET_ARCH_I386 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL arm64) set(CLR_CMAKE_TARGET_ARCH_ARM64 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL loongarch64) set(CLR_CMAKE_TARGET_ARCH_LOONGARCH64 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL arm) set(CLR_CMAKE_TARGET_ARCH_ARM 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL armv6) set(CLR_CMAKE_TARGET_ARCH_ARMV6 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL armel) set(CLR_CMAKE_TARGET_ARCH_ARM 1) set(CLR_CMAKE_TARGET_ARCH_ARMV7L 1) set(ARM_SOFTFP 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL s390x) set(CLR_CMAKE_TARGET_ARCH_S390X 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL wasm) set(CLR_CMAKE_TARGET_ARCH_WASM 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL mips64) set(CLR_CMAKE_TARGET_ARCH_MIPS64 1) else() clr_unknown_arch() endif() # Set TARGET architecture variables # Target os will be a cmake param (optional) for both windows as well as non-windows build # if target os is not specified then host & target os are same if (NOT DEFINED CLR_CMAKE_TARGET_OS OR CLR_CMAKE_TARGET_OS STREQUAL "" ) set(CLR_CMAKE_TARGET_OS ${CLR_CMAKE_HOST_OS}) endif() if(CLR_CMAKE_TARGET_OS STREQUAL Linux) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_LINUX 1) endif(CLR_CMAKE_TARGET_OS STREQUAL Linux) if(CLR_CMAKE_HOST_LINUX_MUSL) set(CLR_CMAKE_TARGET_LINUX_MUSL 1) endif(CLR_CMAKE_HOST_LINUX_MUSL) if(CLR_CMAKE_TARGET_OS STREQUAL tizen) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_LINUX 1) set(CLR_CMAKE_TARGET_TIZEN_LINUX 1) endif(CLR_CMAKE_TARGET_OS STREQUAL tizen) if(CLR_CMAKE_TARGET_OS STREQUAL alpine) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_LINUX 1) set(CLR_CMAKE_TARGET_ALPINE_LINUX 1) endif(CLR_CMAKE_TARGET_OS STREQUAL alpine) if(CLR_CMAKE_TARGET_OS STREQUAL Android) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_LINUX 1) set(CLR_CMAKE_TARGET_ANDROID 1) endif(CLR_CMAKE_TARGET_OS STREQUAL Android) if(CLR_CMAKE_TARGET_OS STREQUAL Darwin) set(CLR_CMAKE_TARGET_UNIX 1) if(CMAKE_SYSTEM_VARIANT STREQUAL MacCatalyst) set(CLR_CMAKE_TARGET_MACCATALYST 1) else() set(CLR_CMAKE_TARGET_OSX 1) endif(CMAKE_SYSTEM_VARIANT STREQUAL MacCatalyst) endif(CLR_CMAKE_TARGET_OS STREQUAL Darwin) if(CLR_CMAKE_TARGET_OS STREQUAL iOS OR CLR_CMAKE_TARGET_OS STREQUAL iOSSimulator) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_IOS 1) endif(CLR_CMAKE_TARGET_OS STREQUAL iOS OR CLR_CMAKE_TARGET_OS STREQUAL iOSSimulator) if(CLR_CMAKE_TARGET_OS STREQUAL tvOS OR CLR_CMAKE_TARGET_OS STREQUAL tvOSSimulator) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_TVOS 1) endif(CLR_CMAKE_TARGET_OS STREQUAL tvOS OR CLR_CMAKE_TARGET_OS STREQUAL tvOSSimulator) if(CLR_CMAKE_TARGET_OS STREQUAL FreeBSD) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_FREEBSD 1) endif(CLR_CMAKE_TARGET_OS STREQUAL FreeBSD) if(CLR_CMAKE_TARGET_OS STREQUAL OpenBSD) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_OPENBSD 1) endif(CLR_CMAKE_TARGET_OS STREQUAL OpenBSD) if(CLR_CMAKE_TARGET_OS STREQUAL NetBSD) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_NETBSD 1) endif(CLR_CMAKE_TARGET_OS STREQUAL NetBSD) if(CLR_CMAKE_TARGET_OS STREQUAL SunOS) set(CLR_CMAKE_TARGET_UNIX 1) if(CLR_CMAKE_HOST_OS_ILLUMOS) set(CLR_CMAKE_TARGET_OS_ILLUMOS 1) else(CLR_CMAKE_HOST_OS_ILLUMOS) set(CLR_CMAKE_TARGET_OS_SOLARIS 1) endif(CLR_CMAKE_HOST_OS_ILLUMOS) set(CLR_CMAKE_TARGET_SUNOS 1) endif(CLR_CMAKE_TARGET_OS STREQUAL SunOS) if(CLR_CMAKE_TARGET_OS STREQUAL Emscripten) set(CLR_CMAKE_TARGET_UNIX 1) set(CLR_CMAKE_TARGET_LINUX 1) set(CLR_CMAKE_TARGET_BROWSER 1) endif(CLR_CMAKE_TARGET_OS STREQUAL Emscripten) if(CLR_CMAKE_TARGET_UNIX) if(CLR_CMAKE_TARGET_ARCH STREQUAL x64) set(CLR_CMAKE_TARGET_UNIX_AMD64 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL armel) set(CLR_CMAKE_TARGET_UNIX_ARM 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL arm) set(CLR_CMAKE_TARGET_UNIX_ARM 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL armv6) set(CLR_CMAKE_TARGET_UNIX_ARMV6 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL arm64) set(CLR_CMAKE_TARGET_UNIX_ARM64 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL loongarch64) set(CLR_CMAKE_TARGET_UNIX_LOONGARCH64 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL x86) set(CLR_CMAKE_TARGET_UNIX_X86 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL s390x) set(CLR_CMAKE_TARGET_UNIX_S390X 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL wasm) set(CLR_CMAKE_TARGET_UNIX_WASM 1) elseif(CLR_CMAKE_TARGET_ARCH STREQUAL mips64) set(CLR_CMAKE_TARGET_UNIX_MIPS64 1) else() clr_unknown_arch() endif() else() set(CLR_CMAKE_TARGET_WIN32 1) endif(CLR_CMAKE_TARGET_UNIX) # check if host & target os/arch combination are valid if (CLR_CMAKE_TARGET_OS STREQUAL CLR_CMAKE_HOST_OS) if(NOT(CLR_CMAKE_TARGET_ARCH STREQUAL CLR_CMAKE_HOST_ARCH)) if(NOT((CLR_CMAKE_HOST_ARCH_AMD64 AND CLR_CMAKE_TARGET_ARCH_ARM64) OR (CLR_CMAKE_HOST_ARCH_I386 AND CLR_CMAKE_TARGET_ARCH_ARM) OR (CLR_CMAKE_HOST_ARCH_AMD64 AND CLR_CMAKE_TARGET_ARCH_ARM) OR (CLR_CMAKE_HOST_ARCH_AMD64 AND CLR_CMAKE_TARGET_ARCH_I386))) message(FATAL_ERROR "Invalid platform and target arch combination TARGET_ARCH=${CLR_CMAKE_TARGET_ARCH} HOST_ARCH=${CLR_CMAKE_HOST_ARCH}") endif() endif() else() if(NOT (CLR_CMAKE_HOST_OS STREQUAL windows)) message(FATAL_ERROR "Invalid host and target os/arch combination. Host OS: ${CLR_CMAKE_HOST_OS}") endif() if(NOT (CLR_CMAKE_TARGET_LINUX OR CLR_CMAKE_TARGET_ALPINE_LINUX)) message(FATAL_ERROR "Invalid host and target os/arch combination. Target OS: ${CLR_CMAKE_TARGET_OS}") endif() if(NOT ((CLR_CMAKE_HOST_ARCH_AMD64 AND (CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64)) OR (CLR_CMAKE_HOST_ARCH_I386 AND CLR_CMAKE_TARGET_ARCH_ARM))) message(FATAL_ERROR "Invalid host and target os/arch combination. Host Arch: ${CLR_CMAKE_HOST_ARCH} Target Arch: ${CLR_CMAKE_TARGET_ARCH}") endif() endif() if(NOT CLR_CMAKE_TARGET_BROWSER) # The default linker on Solaris also does not support PIE. if(NOT CLR_CMAKE_TARGET_ANDROID AND NOT CLR_CMAKE_TARGET_SUNOS AND NOT CLR_CMAKE_TARGET_OSX AND NOT CLR_CMAKE_TARGET_MACCATALYST AND NOT CLR_CMAKE_HOST_TVOS AND NOT CLR_CMAKE_HOST_IOS AND NOT MSVC) set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie") add_compile_options($<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,EXECUTABLE>:-fPIE>) add_compile_options($<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,SHARED_LIBRARY>:-fPIC>) endif() set(CMAKE_POSITION_INDEPENDENT_CODE ON) endif() string(TOLOWER "${CMAKE_BUILD_TYPE}" LOWERCASE_CMAKE_BUILD_TYPE) if(LOWERCASE_CMAKE_BUILD_TYPE STREQUAL debug) # Clear _FORTIFY_SOURCE=2, if set string(REPLACE "-D_FORTIFY_SOURCE=2 " "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") string(REPLACE "-D_FORTIFY_SOURCE=2 " "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") endif()
1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./eng/native/configuretools.cmake
include(${CMAKE_CURRENT_LIST_DIR}/configureplatform.cmake) # Get the version of the compiler that is in the file name for tool location. set (CLR_CMAKE_COMPILER_FILE_NAME_VERSION "") if (CMAKE_C_COMPILER MATCHES "-?[0-9]+(\.[0-9]+)?$") set(CLR_CMAKE_COMPILER_FILE_NAME_VERSION "${CMAKE_MATCH_0}") endif() if(NOT WIN32 AND NOT CLR_CMAKE_TARGET_BROWSER) if(CMAKE_C_COMPILER_ID MATCHES "Clang") if(APPLE) set(TOOLSET_PREFIX "") else() set(TOOLSET_PREFIX "llvm-") endif() elseif(CMAKE_C_COMPILER_ID MATCHES "GNU") if(CMAKE_CROSSCOMPILING) set(TOOLSET_PREFIX "${CMAKE_C_COMPILER_TARGET}-") else() set(TOOLSET_PREFIX "") endif() endif() function(locate_toolchain_exec exec var) string(TOUPPER ${exec} EXEC_UPPERCASE) if(NOT "$ENV{CLR_${EXEC_UPPERCASE}}" STREQUAL "") set(${var} "$ENV{CLR_${EXEC_UPPERCASE}}" PARENT_SCOPE) return() endif() find_program(EXEC_LOCATION_${exec} NAMES "${TOOLSET_PREFIX}${exec}${CLR_CMAKE_COMPILER_FILE_NAME_VERSION}" "${TOOLSET_PREFIX}${exec}") if (EXEC_LOCATION_${exec} STREQUAL "EXEC_LOCATION_${exec}-NOTFOUND") message(FATAL_ERROR "Unable to find toolchain executable. Name: ${exec}, Prefix: ${TOOLSET_PREFIX}.") endif() set(${var} ${EXEC_LOCATION_${exec}} PARENT_SCOPE) endfunction() locate_toolchain_exec(ar CMAKE_AR) locate_toolchain_exec(nm CMAKE_NM) locate_toolchain_exec(ranlib CMAKE_RANLIB) if(CMAKE_C_COMPILER_ID MATCHES "Clang") locate_toolchain_exec(link CMAKE_LINKER) endif() if(NOT CLR_CMAKE_TARGET_OSX AND NOT CLR_CMAKE_TARGET_MACCATALYST AND NOT CLR_CMAKE_TARGET_IOS AND NOT CLR_CMAKE_TARGET_TVOS AND (NOT CLR_CMAKE_TARGET_ANDROID OR CROSS_ROOTFS)) locate_toolchain_exec(objdump CMAKE_OBJDUMP) if(CLR_CMAKE_TARGET_ANDROID) set(TOOLSET_PREFIX ${ANDROID_TOOLCHAIN_PREFIX}) elseif(CMAKE_CROSSCOMPILING AND NOT DEFINED CLR_CROSS_COMPONENTS_BUILD AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(armv7l|armv6l|aarch64|arm|s390x)$") set(TOOLSET_PREFIX "${TOOLCHAIN}-") else() set(TOOLSET_PREFIX "") endif() locate_toolchain_exec(objcopy CMAKE_OBJCOPY) endif() endif() if (NOT CLR_CMAKE_HOST_WIN32) # detect linker separate_arguments(ldVersion UNIX_COMMAND "${CMAKE_C_COMPILER} ${CMAKE_SHARED_LINKER_FLAGS} -Wl,--version") execute_process(COMMAND ${ldVersion} ERROR_QUIET OUTPUT_VARIABLE ldVersionOutput) if("${ldVersionOutput}" MATCHES "LLD") set(LD_LLVM 1) elseif("${ldVersionOutput}" MATCHES "GNU ld" OR "${ldVersionOutput}" MATCHES "GNU gold" OR "${ldVersionOutput}" MATCHES "GNU linkers") set(LD_GNU 1) elseif("${ldVersionOutput}" MATCHES "Solaris Link") set(LD_SOLARIS 1) else(CLR_CMAKE_HOST_OSX OR CLR_CMAKE_HOST_MACCATALYST) set(LD_OSX 1) endif() endif()
include(${CMAKE_CURRENT_LIST_DIR}/configureplatform.cmake) # Get the version of the compiler that is in the file name for tool location. set (CLR_CMAKE_COMPILER_FILE_NAME_VERSION "") if (CMAKE_C_COMPILER MATCHES "-?[0-9]+(\.[0-9]+)?$") set(CLR_CMAKE_COMPILER_FILE_NAME_VERSION "${CMAKE_MATCH_0}") endif() if(NOT WIN32 AND NOT CLR_CMAKE_TARGET_BROWSER) if(CMAKE_C_COMPILER_ID MATCHES "Clang") if(APPLE) set(TOOLSET_PREFIX "") else() set(TOOLSET_PREFIX "llvm-") endif() elseif(CMAKE_C_COMPILER_ID MATCHES "GNU") if(CMAKE_CROSSCOMPILING) set(TOOLSET_PREFIX "${CMAKE_C_COMPILER_TARGET}-") else() set(TOOLSET_PREFIX "") endif() endif() function(locate_toolchain_exec exec var) string(TOUPPER ${exec} EXEC_UPPERCASE) if(NOT "$ENV{CLR_${EXEC_UPPERCASE}}" STREQUAL "") set(${var} "$ENV{CLR_${EXEC_UPPERCASE}}" PARENT_SCOPE) return() endif() find_program(EXEC_LOCATION_${exec} NAMES "${TOOLSET_PREFIX}${exec}${CLR_CMAKE_COMPILER_FILE_NAME_VERSION}" "${TOOLSET_PREFIX}${exec}") if (EXEC_LOCATION_${exec} STREQUAL "EXEC_LOCATION_${exec}-NOTFOUND") message(FATAL_ERROR "Unable to find toolchain executable. Name: ${exec}, Prefix: ${TOOLSET_PREFIX}.") endif() set(${var} ${EXEC_LOCATION_${exec}} PARENT_SCOPE) endfunction() locate_toolchain_exec(ar CMAKE_AR) locate_toolchain_exec(nm CMAKE_NM) locate_toolchain_exec(ranlib CMAKE_RANLIB) if(CMAKE_C_COMPILER_ID MATCHES "Clang") locate_toolchain_exec(link CMAKE_LINKER) endif() if(NOT CLR_CMAKE_TARGET_OSX AND NOT CLR_CMAKE_TARGET_MACCATALYST AND NOT CLR_CMAKE_TARGET_IOS AND NOT CLR_CMAKE_TARGET_TVOS AND (NOT CLR_CMAKE_TARGET_ANDROID OR CROSS_ROOTFS)) locate_toolchain_exec(objdump CMAKE_OBJDUMP) if(CLR_CMAKE_TARGET_ANDROID) set(TOOLSET_PREFIX ${ANDROID_TOOLCHAIN_PREFIX}) elseif(CMAKE_CROSSCOMPILING AND NOT DEFINED CLR_CROSS_COMPONENTS_BUILD AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(armv8l|armv7l|armv6l|aarch64|arm|s390x)$") set(TOOLSET_PREFIX "${TOOLCHAIN}-") else() set(TOOLSET_PREFIX "") endif() locate_toolchain_exec(objcopy CMAKE_OBJCOPY) endif() endif() if (NOT CLR_CMAKE_HOST_WIN32) # detect linker separate_arguments(ldVersion UNIX_COMMAND "${CMAKE_C_COMPILER} ${CMAKE_SHARED_LINKER_FLAGS} -Wl,--version") execute_process(COMMAND ${ldVersion} ERROR_QUIET OUTPUT_VARIABLE ldVersionOutput) if("${ldVersionOutput}" MATCHES "LLD") set(LD_LLVM 1) elseif("${ldVersionOutput}" MATCHES "GNU ld" OR "${ldVersionOutput}" MATCHES "GNU gold" OR "${ldVersionOutput}" MATCHES "GNU linkers") set(LD_GNU 1) elseif("${ldVersionOutput}" MATCHES "Solaris Link") set(LD_SOLARIS 1) else(CLR_CMAKE_HOST_OSX OR CLR_CMAKE_HOST_MACCATALYST) set(LD_OSX 1) endif() endif()
1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./eng/native/init-os-and-arch.sh
#!/usr/bin/env bash # Use uname to determine what the OS is. OSName=$(uname -s) if command -v getprop && getprop ro.product.system.model 2>&1 | grep -qi android; then OSName="Android" fi case "$OSName" in FreeBSD|Linux|NetBSD|OpenBSD|SunOS|Android) os="$OSName" ;; Darwin) os=OSX ;; *) echo "Unsupported OS $OSName detected, configuring as if for Linux" os=Linux ;; esac # On Solaris, `uname -m` is discouraged, see https://docs.oracle.com/cd/E36784_01/html/E36870/uname-1.html # and `uname -p` returns processor type (e.g. i386 on amd64). # The appropriate tool to determine CPU is isainfo(1) https://docs.oracle.com/cd/E36784_01/html/E36870/isainfo-1.html. if [ "$os" = "SunOS" ]; then if uname -o 2>&1 | grep -q illumos; then os="illumos" else os="Solaris" fi CPUName=$(isainfo -n) else # For the rest of the operating systems, use uname(1) to determine what the CPU is. CPUName=$(uname -m) fi case "$CPUName" in arm64|aarch64) arch=arm64 ;; loongarch64) arch=loongarch64 ;; amd64|x86_64) arch=x64 ;; armv7l) if (NAME=""; . /etc/os-release; test "$NAME" = "Tizen"); then arch=armel else arch=arm fi ;; armv6l) arch=armv6 ;; i[3-6]86) echo "Unsupported CPU $CPUName detected, build might not succeed!" arch=x86 ;; s390x) arch=s390x ;; *) echo "Unknown CPU $CPUName detected, configuring as if for x64" arch=x64 ;; esac
#!/usr/bin/env bash # Use uname to determine what the OS is. OSName=$(uname -s) if command -v getprop && getprop ro.product.system.model 2>&1 | grep -qi android; then OSName="Android" fi case "$OSName" in FreeBSD|Linux|NetBSD|OpenBSD|SunOS|Android) os="$OSName" ;; Darwin) os=OSX ;; *) echo "Unsupported OS $OSName detected, configuring as if for Linux" os=Linux ;; esac # On Solaris, `uname -m` is discouraged, see https://docs.oracle.com/cd/E36784_01/html/E36870/uname-1.html # and `uname -p` returns processor type (e.g. i386 on amd64). # The appropriate tool to determine CPU is isainfo(1) https://docs.oracle.com/cd/E36784_01/html/E36870/isainfo-1.html. if [ "$os" = "SunOS" ]; then if uname -o 2>&1 | grep -q illumos; then os="illumos" else os="Solaris" fi CPUName=$(isainfo -n) else # For the rest of the operating systems, use uname(1) to determine what the CPU is. CPUName=$(uname -m) fi case "$CPUName" in arm64|aarch64) arch=arm64 ;; loongarch64) arch=loongarch64 ;; amd64|x86_64) arch=x64 ;; armv7l|armv8l) if (NAME=""; . /etc/os-release; test "$NAME" = "Tizen"); then arch=armel else arch=arm fi ;; armv6l) arch=armv6 ;; i[3-6]86) echo "Unsupported CPU $CPUName detected, build might not succeed!" arch=x86 ;; s390x) arch=s390x ;; *) echo "Unknown CPU $CPUName detected, configuring as if for x64" arch=x64 ;; esac
1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/mono/mono/arch/arm/dpiops.sh
#!/bin/sh OPCODES="AND EOR SUB RSB ADD ADC SBC RSC ORR BIC" CMP_OPCODES="TST TEQ CMP CMN" MOV_OPCODES="MOV MVN" # $1: opcode list # $2: template gen() { for i in $1; do sed "s/<Op>/$i/g" $2.th done } echo -e "/* Macros for DPI ops, auto-generated from template */\n" echo -e "\n/* mov/mvn */\n" gen "$MOV_OPCODES" mov_macros echo -e "\n/* DPIs, arithmetic and logical */\n" gen "$OPCODES" dpi_macros echo -e "\n\n" echo -e "\n/* DPIs, comparison */\n" gen "$CMP_OPCODES" cmp_macros echo -e "\n/* end generated */\n"
#!/bin/sh OPCODES="AND EOR SUB RSB ADD ADC SBC RSC ORR BIC" CMP_OPCODES="TST TEQ CMP CMN" MOV_OPCODES="MOV MVN" # $1: opcode list # $2: template gen() { for i in $1; do sed "s/<Op>/$i/g" $2.th done } echo -e "/* Macros for DPI ops, auto-generated from template */\n" echo -e "\n/* mov/mvn */\n" gen "$MOV_OPCODES" mov_macros echo -e "\n/* DPIs, arithmetic and logical */\n" gen "$OPCODES" dpi_macros echo -e "\n\n" echo -e "\n/* DPIs, comparison */\n" gen "$CMP_OPCODES" cmp_macros echo -e "\n/* end generated */\n"
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/tests/run.sh
#!/usr/bin/env bash function print_usage { echo '' echo 'CoreCLR test runner script.' echo '' echo 'Typical command line:' echo '' echo 'src/tests/run.sh <options>' echo '' echo 'Optional arguments:' echo ' -h|--help : Show usage information.' echo ' -v, --verbose : Show output from each test.' echo ' <arch> : One of x64, x86, arm, arm64, wasm. Defaults to current architecture.' echo ' Android : Set build OS to Android.' echo ' --test-env=<path> : Script to set environment variables for tests' echo ' --testRootDir=<path> : Root directory of the test build (e.g. runtime/artifacts/tests/windows.x64.Debug).' echo ' --disableEventLogging : Disable the events logged by both VM and Managed Code' echo ' --sequential : Run tests sequentially (default is to run in parallel).' echo ' --runcrossgen2tests : Runs the ReadyToRun tests compiled with Crossgen2' echo ' --jitstress=<n> : Runs the tests with COMPlus_JitStress=n' echo ' --jitstressregs=<n> : Runs the tests with COMPlus_JitStressRegs=n' echo ' --jitminopts : Runs the tests with COMPlus_JITMinOpts=1' echo ' --jitforcerelocs : Runs the tests with COMPlus_ForceRelocs=1' echo ' --gcname=<n> : Runs the tests with COMPlus_GCName=n' echo ' --gcstresslevel=<n> : Runs the tests with COMPlus_GCStress=n' echo ' 0: None 1: GC on all allocs and '"'easy'"' places' echo ' 2: GC on transitions to preemptive GC 4: GC on every allowable JITed instr' echo ' 8: GC on every allowable NGEN instr 16: GC only on a unique stack trace' echo ' --gcsimulator : Runs the GCSimulator tests' echo ' --long-gc : Runs the long GC tests' echo ' --useServerGC : Enable server GC for this test run' echo ' --ilasmroundtrip : Runs ilasm round trip on the tests' echo ' --link <ILlink> : Runs the tests after linking via ILlink' echo ' --printLastResultsOnly : Print the results of the last run' echo ' --runincontext : Run each tests in an unloadable AssemblyLoadContext' echo ' --tieringtest : Run each test to encourage tier1 rejitting' echo ' --runnativeaottests : Run NativeAOT compiled tests' echo ' --limitedDumpGeneration : ' } function check_cpu_architecture { local CPUName=$(uname -m) local __arch= if [[ "$(uname -s)" == "SunOS" ]]; then CPUName=$(isainfo -n) fi case $CPUName in i686) __arch=x86 ;; amd64|x86_64) __arch=x64 ;; armv7l) __arch=arm ;; aarch64|arm64) __arch=arm64 ;; *) echo "Unknown CPU $CPUName detected, configuring as if for x64" __arch=x64 ;; esac echo "$__arch" } ################################################################################ # Handle Arguments ################################################################################ ARCH=$(check_cpu_architecture) # Exit code constants readonly EXIT_CODE_SUCCESS=0 # Script ran normally. readonly EXIT_CODE_EXCEPTION=1 # Script exited because something exceptional happened (e.g. bad arguments, Ctrl-C interrupt). readonly EXIT_CODE_TEST_FAILURE=2 # Script completed successfully, but one or more tests failed. # Argument variables buildArch=$ARCH buildOS= buildConfiguration="Debug" testRootDir= testEnv= gcsimulator= longgc= limitedCoreDumps= ((disableEventLogging = 0)) ((serverGC = 0)) # Handle arguments verbose=0 ilasmroundtrip= printLastResultsOnly= runSequential=0 runincontext=0 tieringtest=0 nativeaottest=0 for i in "$@" do case $i in -h|--help) print_usage exit $EXIT_CODE_SUCCESS ;; -v|--verbose) verbose=1 ;; x64) buildArch="x64" ;; x86) buildArch="x86" ;; arm) buildArch="arm" ;; arm64) buildArch="arm64" ;; wasm) buildArch="wasm" ;; Android) buildOS="Android" ;; debug|Debug) buildConfiguration="Debug" ;; checked|Checked) buildConfiguration="Checked" ;; release|Release) buildConfiguration="Release" ;; --printLastResultsOnly) printLastResultsOnly=1 ;; --jitstress=*) export COMPlus_JitStress=${i#*=} ;; --jitstressregs=*) export COMPlus_JitStressRegs=${i#*=} ;; --jitminopts) export COMPlus_JITMinOpts=1 ;; --jitforcerelocs) export COMPlus_ForceRelocs=1 ;; --link=*) export ILLINK=${i#*=} export DoLink=true ;; --ilasmroundtrip) ((ilasmroundtrip = 1)) ;; --testRootDir=*) testRootDir=${i#*=} ;; --disableEventLogging) ((disableEventLogging = 1)) ;; --runcrossgen2tests) export RunCrossGen2=1 ;; --sequential) runSequential=1 ;; --useServerGC) ((serverGC = 1)) ;; --long-gc) ((longgc = 1)) ;; --gcsimulator) ((gcsimulator = 1)) ;; --test-env=*) testEnv=${i#*=} ;; --gcstresslevel=*) export COMPlus_GCStress=${i#*=} ;; --gcname=*) export COMPlus_GCName=${i#*=} ;; --limitedDumpGeneration) limitedCoreDumps=ON ;; --runincontext) runincontext=1 ;; --tieringtest) tieringtest=1 ;; --runnativeaottests) nativeaottest=1 ;; *) echo "Unknown switch: $i" print_usage exit $EXIT_CODE_SUCCESS ;; esac done ################################################################################ # Set environment variables affecting tests. # (These should be run.py arguments.) ################################################################################ if ((disableEventLogging == 0)); then export COMPlus_EnableEventLog=1 fi if ((serverGC != 0)); then export COMPlus_gcServer="$serverGC" fi ################################################################################ # Call run.py to run tests. ################################################################################ runtestPyArguments=("-arch" "${buildArch}" "-build_type" "${buildConfiguration}") scriptPath="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" repoRootDir=$scriptPath/../.. echo "Build Architecture : ${buildArch}" echo "Build Configuration : ${buildConfiguration}" if [ "$buildArch" = "wasm" ]; then runtestPyArguments+=("-os" "Browser") fi if [ "$buildOS" = "Android" ]; then runtestPyArguments+=("-os" "Android") fi if [[ -n "$testRootDir" ]]; then runtestPyArguments+=("-test_location" "$testRootDir") echo "Test Location : ${testRootDir}" fi if [[ -n "${testEnv}" ]]; then runtestPyArguments+=("-test_env" "${testEnv}") echo "Test Env : ${testEnv}" fi echo "" if [[ -n "$longgc" ]]; then echo "Running Long GC tests" runtestPyArguments+=("--long_gc") fi if [[ -n "$gcsimulator" ]]; then echo "Running GC simulator tests" runtestPyArguments+=("--gcsimulator") fi if [[ -n "$ilasmroundtrip" ]]; then echo "Running Ilasm round trip" runtestPyArguments+=("--ilasmroundtrip") fi if (($verbose!=0)); then runtestPyArguments+=("--verbose") fi if [ "$runSequential" -ne 0 ]; then echo "Run tests sequentially." runtestPyArguments+=("--sequential") fi if [[ -n "$printLastResultsOnly" ]]; then runtestPyArguments+=("--analyze_results_only") fi if [[ -n "$RunCrossGen2" ]]; then runtestPyArguments+=("--run_crossgen2_tests") fi if [[ "$limitedCoreDumps" == "ON" ]]; then runtestPyArguments+=("--limited_core_dumps") fi if [[ "$runincontext" -ne 0 ]]; then echo "Running in an unloadable AssemblyLoadContext" runtestPyArguments+=("--run_in_context") fi if [[ "$tieringtest" -ne 0 ]]; then echo "Running to encourage tier1 rejitting" runtestPyArguments+=("--tieringtest") fi if [[ "$nativeaottest" -ne 0 ]]; then echo "Running NativeAOT compiled tests" runtestPyArguments+=("--run_nativeaot_tests") fi # Default to python3 if it is installed __Python=python if command -v python3 &>/dev/null; then __Python=python3 fi # Run the tests using cross platform run.py echo "$__Python $repoRootDir/src/tests/run.py ${runtestPyArguments[@]}" $__Python "$repoRootDir/src/tests/run.py" "${runtestPyArguments[@]}" exit "$?"
#!/usr/bin/env bash function print_usage { echo '' echo 'CoreCLR test runner script.' echo '' echo 'Typical command line:' echo '' echo 'src/tests/run.sh <options>' echo '' echo 'Optional arguments:' echo ' -h|--help : Show usage information.' echo ' -v, --verbose : Show output from each test.' echo ' <arch> : One of x64, x86, arm, arm64, wasm. Defaults to current architecture.' echo ' Android : Set build OS to Android.' echo ' --test-env=<path> : Script to set environment variables for tests' echo ' --testRootDir=<path> : Root directory of the test build (e.g. runtime/artifacts/tests/windows.x64.Debug).' echo ' --disableEventLogging : Disable the events logged by both VM and Managed Code' echo ' --sequential : Run tests sequentially (default is to run in parallel).' echo ' --runcrossgen2tests : Runs the ReadyToRun tests compiled with Crossgen2' echo ' --jitstress=<n> : Runs the tests with COMPlus_JitStress=n' echo ' --jitstressregs=<n> : Runs the tests with COMPlus_JitStressRegs=n' echo ' --jitminopts : Runs the tests with COMPlus_JITMinOpts=1' echo ' --jitforcerelocs : Runs the tests with COMPlus_ForceRelocs=1' echo ' --gcname=<n> : Runs the tests with COMPlus_GCName=n' echo ' --gcstresslevel=<n> : Runs the tests with COMPlus_GCStress=n' echo ' 0: None 1: GC on all allocs and '"'easy'"' places' echo ' 2: GC on transitions to preemptive GC 4: GC on every allowable JITed instr' echo ' 8: GC on every allowable NGEN instr 16: GC only on a unique stack trace' echo ' --gcsimulator : Runs the GCSimulator tests' echo ' --long-gc : Runs the long GC tests' echo ' --useServerGC : Enable server GC for this test run' echo ' --ilasmroundtrip : Runs ilasm round trip on the tests' echo ' --link <ILlink> : Runs the tests after linking via ILlink' echo ' --printLastResultsOnly : Print the results of the last run' echo ' --runincontext : Run each tests in an unloadable AssemblyLoadContext' echo ' --tieringtest : Run each test to encourage tier1 rejitting' echo ' --runnativeaottests : Run NativeAOT compiled tests' echo ' --limitedDumpGeneration : ' } function check_cpu_architecture { local CPUName=$(uname -m) local __arch= if [[ "$(uname -s)" == "SunOS" ]]; then CPUName=$(isainfo -n) fi case $CPUName in i686) __arch=x86 ;; amd64|x86_64) __arch=x64 ;; armv7l) __arch=arm ;; aarch64|arm64) __arch=arm64 ;; *) echo "Unknown CPU $CPUName detected, configuring as if for x64" __arch=x64 ;; esac echo "$__arch" } ################################################################################ # Handle Arguments ################################################################################ ARCH=$(check_cpu_architecture) # Exit code constants readonly EXIT_CODE_SUCCESS=0 # Script ran normally. readonly EXIT_CODE_EXCEPTION=1 # Script exited because something exceptional happened (e.g. bad arguments, Ctrl-C interrupt). readonly EXIT_CODE_TEST_FAILURE=2 # Script completed successfully, but one or more tests failed. # Argument variables buildArch=$ARCH buildOS= buildConfiguration="Debug" testRootDir= testEnv= gcsimulator= longgc= limitedCoreDumps= ((disableEventLogging = 0)) ((serverGC = 0)) # Handle arguments verbose=0 ilasmroundtrip= printLastResultsOnly= runSequential=0 runincontext=0 tieringtest=0 nativeaottest=0 for i in "$@" do case $i in -h|--help) print_usage exit $EXIT_CODE_SUCCESS ;; -v|--verbose) verbose=1 ;; x64) buildArch="x64" ;; x86) buildArch="x86" ;; arm) buildArch="arm" ;; arm64) buildArch="arm64" ;; wasm) buildArch="wasm" ;; Android) buildOS="Android" ;; debug|Debug) buildConfiguration="Debug" ;; checked|Checked) buildConfiguration="Checked" ;; release|Release) buildConfiguration="Release" ;; --printLastResultsOnly) printLastResultsOnly=1 ;; --jitstress=*) export COMPlus_JitStress=${i#*=} ;; --jitstressregs=*) export COMPlus_JitStressRegs=${i#*=} ;; --jitminopts) export COMPlus_JITMinOpts=1 ;; --jitforcerelocs) export COMPlus_ForceRelocs=1 ;; --link=*) export ILLINK=${i#*=} export DoLink=true ;; --ilasmroundtrip) ((ilasmroundtrip = 1)) ;; --testRootDir=*) testRootDir=${i#*=} ;; --disableEventLogging) ((disableEventLogging = 1)) ;; --runcrossgen2tests) export RunCrossGen2=1 ;; --sequential) runSequential=1 ;; --useServerGC) ((serverGC = 1)) ;; --long-gc) ((longgc = 1)) ;; --gcsimulator) ((gcsimulator = 1)) ;; --test-env=*) testEnv=${i#*=} ;; --gcstresslevel=*) export COMPlus_GCStress=${i#*=} ;; --gcname=*) export COMPlus_GCName=${i#*=} ;; --limitedDumpGeneration) limitedCoreDumps=ON ;; --runincontext) runincontext=1 ;; --tieringtest) tieringtest=1 ;; --runnativeaottests) nativeaottest=1 ;; *) echo "Unknown switch: $i" print_usage exit $EXIT_CODE_SUCCESS ;; esac done ################################################################################ # Set environment variables affecting tests. # (These should be run.py arguments.) ################################################################################ if ((disableEventLogging == 0)); then export COMPlus_EnableEventLog=1 fi if ((serverGC != 0)); then export COMPlus_gcServer="$serverGC" fi ################################################################################ # Call run.py to run tests. ################################################################################ runtestPyArguments=("-arch" "${buildArch}" "-build_type" "${buildConfiguration}") scriptPath="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" repoRootDir=$scriptPath/../.. echo "Build Architecture : ${buildArch}" echo "Build Configuration : ${buildConfiguration}" if [ "$buildArch" = "wasm" ]; then runtestPyArguments+=("-os" "Browser") fi if [ "$buildOS" = "Android" ]; then runtestPyArguments+=("-os" "Android") fi if [[ -n "$testRootDir" ]]; then runtestPyArguments+=("-test_location" "$testRootDir") echo "Test Location : ${testRootDir}" fi if [[ -n "${testEnv}" ]]; then runtestPyArguments+=("-test_env" "${testEnv}") echo "Test Env : ${testEnv}" fi echo "" if [[ -n "$longgc" ]]; then echo "Running Long GC tests" runtestPyArguments+=("--long_gc") fi if [[ -n "$gcsimulator" ]]; then echo "Running GC simulator tests" runtestPyArguments+=("--gcsimulator") fi if [[ -n "$ilasmroundtrip" ]]; then echo "Running Ilasm round trip" runtestPyArguments+=("--ilasmroundtrip") fi if (($verbose!=0)); then runtestPyArguments+=("--verbose") fi if [ "$runSequential" -ne 0 ]; then echo "Run tests sequentially." runtestPyArguments+=("--sequential") fi if [[ -n "$printLastResultsOnly" ]]; then runtestPyArguments+=("--analyze_results_only") fi if [[ -n "$RunCrossGen2" ]]; then runtestPyArguments+=("--run_crossgen2_tests") fi if [[ "$limitedCoreDumps" == "ON" ]]; then runtestPyArguments+=("--limited_core_dumps") fi if [[ "$runincontext" -ne 0 ]]; then echo "Running in an unloadable AssemblyLoadContext" runtestPyArguments+=("--run_in_context") fi if [[ "$tieringtest" -ne 0 ]]; then echo "Running to encourage tier1 rejitting" runtestPyArguments+=("--tieringtest") fi if [[ "$nativeaottest" -ne 0 ]]; then echo "Running NativeAOT compiled tests" runtestPyArguments+=("--run_nativeaot_tests") fi # Default to python3 if it is installed __Python=python if command -v python3 &>/dev/null; then __Python=python3 fi # Run the tests using cross platform run.py echo "$__Python $repoRootDir/src/tests/run.py ${runtestPyArguments[@]}" $__Python "$repoRootDir/src/tests/run.py" "${runtestPyArguments[@]}" exit "$?"
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/mono/mono/tests/verifier/make_ldobj_test.sh
#! /bin/sh SED="sed" if [ `which gsed 2> /dev/null` ]; then SED="gsed" fi TEST_NAME=$1 TEST_VALIDITY=$2 TEST_TYPE1=$3 TEST_TYPE2=$4 ZZ=`echo $TEST_TYPE1 | grep "\&"` T1_REF=$? LOCAL_INIT=""; if [ $T1_REF -eq 0 ]; then T1_NO_REF=`echo $TEST_TYPE1 | cut -d '\' -f 1` INIT_LOCS=", $T1_NO_REF V_0" INIT_IL="ldloca.s 1\n\tstloc.0" fi TEST_FILE=`echo ${TEST_VALIDITY}_${TEST_NAME} | $SED -e 's/ /_/g' -e 's/\./_/g' -e 's/&/mp/g' -e 's/\[/_/g' -e 's/\]/_/g'`_generated.il echo $TEST_FILE $SED -e "s/TYPE1/${TEST_TYPE1}/g" -e "s/VALIDITY/${TEST_VALIDITY}/g" -e "s/TYPE2/${TEST_TYPE2}/g" -e "s/INIT_LOCS/${INIT_LOCS}/g" -e "s/INIT_IL/${INIT_IL}/g"> $TEST_FILE <<//EOF .assembly extern mscorlib { .ver 2:0:0:0 .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) // .z\V.4.. } .assembly 'ldobj_test' { .hash algorithm 0x00008004 .ver 0:0:0:0 } .module ldobj.exe .class Class extends [mscorlib]System.Object { .field public int32 valid } .class public Template\`1<T> extends [mscorlib]System.Object { } .class sealed public StructTemplate\`1<T> extends [mscorlib]System.ValueType { .field public !0 t } .class sealed public StructTemplate2\`1<T> extends [mscorlib]System.ValueType { .field public !0 t } .class public auto ansi sealed MyStruct extends [mscorlib]System.ValueType { .field public int32 foo } .class public auto ansi sealed MyStruct2 extends [mscorlib]System.ValueType { .field public int32 foo } .method public static int32 Main () { .entrypoint .maxstack 8 .locals init (TYPE1 V_0 INIT_LOCS) INIT_IL ldloc.0 ldobj TYPE2 // VALIDITY pop ldc.i4.0 ret } //EOF
#! /bin/sh SED="sed" if [ `which gsed 2> /dev/null` ]; then SED="gsed" fi TEST_NAME=$1 TEST_VALIDITY=$2 TEST_TYPE1=$3 TEST_TYPE2=$4 ZZ=`echo $TEST_TYPE1 | grep "\&"` T1_REF=$? LOCAL_INIT=""; if [ $T1_REF -eq 0 ]; then T1_NO_REF=`echo $TEST_TYPE1 | cut -d '\' -f 1` INIT_LOCS=", $T1_NO_REF V_0" INIT_IL="ldloca.s 1\n\tstloc.0" fi TEST_FILE=`echo ${TEST_VALIDITY}_${TEST_NAME} | $SED -e 's/ /_/g' -e 's/\./_/g' -e 's/&/mp/g' -e 's/\[/_/g' -e 's/\]/_/g'`_generated.il echo $TEST_FILE $SED -e "s/TYPE1/${TEST_TYPE1}/g" -e "s/VALIDITY/${TEST_VALIDITY}/g" -e "s/TYPE2/${TEST_TYPE2}/g" -e "s/INIT_LOCS/${INIT_LOCS}/g" -e "s/INIT_IL/${INIT_IL}/g"> $TEST_FILE <<//EOF .assembly extern mscorlib { .ver 2:0:0:0 .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) // .z\V.4.. } .assembly 'ldobj_test' { .hash algorithm 0x00008004 .ver 0:0:0:0 } .module ldobj.exe .class Class extends [mscorlib]System.Object { .field public int32 valid } .class public Template\`1<T> extends [mscorlib]System.Object { } .class sealed public StructTemplate\`1<T> extends [mscorlib]System.ValueType { .field public !0 t } .class sealed public StructTemplate2\`1<T> extends [mscorlib]System.ValueType { .field public !0 t } .class public auto ansi sealed MyStruct extends [mscorlib]System.ValueType { .field public int32 foo } .class public auto ansi sealed MyStruct2 extends [mscorlib]System.ValueType { .field public int32 foo } .method public static int32 Main () { .entrypoint .maxstack 8 .locals init (TYPE1 V_0 INIT_LOCS) INIT_IL ldloc.0 ldobj TYPE2 // VALIDITY pop ldc.i4.0 ret } //EOF
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/libraries/Common/tests/System/Net/EnterpriseTests/setup/linuxclient/run.sh
#!/usr/bin/env bash cp /SHARED/linuxclient.keytab /etc/krb5.keytab # Keep the container running tail -f /dev/null
#!/usr/bin/env bash cp /SHARED/linuxclient.keytab /etc/krb5.keytab # Keep the container running tail -f /dev/null
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/tests/BuildWasmApps/Wasm.Build.Tests/data/RunScriptTemplate.sh
#!/usr/bin/env bash # SetCommands defined in eng\testing\tests.wasm.targets [[SetCommands]] [[SetCommandsEcho]] EXECUTION_DIR=$(dirname $0) if [[ -n "$3" ]]; then SCENARIO=$3 fi if [[ -z "$HELIX_WORKITEM_UPLOAD_ROOT" ]]; then XHARNESS_OUT="$EXECUTION_DIR/xharness-output" else XHARNESS_OUT="$HELIX_WORKITEM_UPLOAD_ROOT/xharness-output" fi if [[ -n "$XHARNESS_CLI_PATH" ]]; then # When running in CI, we only have the .NET runtime available # We need to call the XHarness CLI DLL directly via dotnet exec HARNESS_RUNNER="dotnet exec $XHARNESS_CLI_PATH" else HARNESS_RUNNER="dotnet xharness" fi if [[ -z "$XHARNESS_COMMAND" ]]; then if [[ "$SCENARIO" == "WasmTestOnBrowser" || "$SCENARIO" == "wasmtestonbrowser" ]]; then XHARNESS_COMMAND="test-browser" else XHARNESS_COMMAND="test" fi fi if [[ "$XHARNESS_COMMAND" == "test" ]]; then if [[ -z "$JS_ENGINE" ]]; then if [[ "$SCENARIO" == "WasmTestOnNodeJs" || "$SCENARIO" == "wasmtestonnodejs" ]]; then JS_ENGINE="--engine=NodeJS" else JS_ENGINE="--engine=V8" fi fi if [[ -z "$MAIN_JS" ]]; then MAIN_JS="--js-file=test-main.js" fi if [[ -z "$JS_ENGINE_ARGS" ]]; then JS_ENGINE_ARGS="--engine-arg=--stack-trace-limit=1000" fi fi if [[ -z "$XHARNESS_ARGS" ]]; then XHARNESS_ARGS="$JS_ENGINE $JS_ENGINE_ARGS $MAIN_JS" fi echo EXECUTION_DIR=$EXECUTION_DIR echo SCENARIO=$SCENARIO echo XHARNESS_OUT=$XHARNESS_OUT echo XHARNESS_CLI_PATH=$XHARNESS_CLI_PATH echo HARNESS_RUNNER=$HARNESS_RUNNER echo XHARNESS_COMMAND=$XHARNESS_COMMAND echo MAIN_JS=$MAIN_JS echo JS_ENGINE=$JS_ENGINE echo JS_ENGINE_ARGS=$JS_ENGINE_ARGS echo XHARNESS_ARGS=$XHARNESS_ARGS function set_env_vars() { if [ "x$TEST_USING_WORKLOADS" = "xtrue" ]; then export PATH=$BASE_DIR/dotnet-workload:$PATH export SDK_HAS_WORKLOAD_INSTALLED=true export SDK_FOR_WORKLOAD_TESTING_PATH=$BASE_DIR/dotnet-workload export AppRefDir=$BASE_DIR/microsoft.netcore.app.ref elif [[ -n "$HELIX_WORKITEM_UPLOAD_ROOT" ]]; then export WasmBuildSupportDir=$BASE_DIR/build else export PATH=$BASE_DIR/sdk-no-workload:$PATH export SDK_HAS_WORKLOAD_INSTALLED=false export SDK_FOR_WORKLOAD_TESTING_PATH=$BASE_DIR/sdk-no-workload fi } export TEST_LOG_PATH=${XHARNESS_OUT}/logs pushd $EXECUTION_DIR # ========================= BEGIN Test Execution ============================= echo ----- start $(date) =============== To repro directly: ===================================================== echo pushd $EXECUTION_DIR # RunCommands defined in eng\testing\tests.wasm.targets [[RunCommandsEcho]] echo popd echo =========================================================================================================== pushd $EXECUTION_DIR # RunCommands defined in eng\testing\tests.wasm.targets [[RunCommands]] _exitCode=$? popd echo ----- end $(date) ----- exit code $_exitCode ---------------------------------------------------------- echo "XHarness artifacts: $XHARNESS_OUT" exit $_exitCode
#!/usr/bin/env bash # SetCommands defined in eng\testing\tests.wasm.targets [[SetCommands]] [[SetCommandsEcho]] EXECUTION_DIR=$(dirname $0) if [[ -n "$3" ]]; then SCENARIO=$3 fi if [[ -z "$HELIX_WORKITEM_UPLOAD_ROOT" ]]; then XHARNESS_OUT="$EXECUTION_DIR/xharness-output" else XHARNESS_OUT="$HELIX_WORKITEM_UPLOAD_ROOT/xharness-output" fi if [[ -n "$XHARNESS_CLI_PATH" ]]; then # When running in CI, we only have the .NET runtime available # We need to call the XHarness CLI DLL directly via dotnet exec HARNESS_RUNNER="dotnet exec $XHARNESS_CLI_PATH" else HARNESS_RUNNER="dotnet xharness" fi if [[ -z "$XHARNESS_COMMAND" ]]; then if [[ "$SCENARIO" == "WasmTestOnBrowser" || "$SCENARIO" == "wasmtestonbrowser" ]]; then XHARNESS_COMMAND="test-browser" else XHARNESS_COMMAND="test" fi fi if [[ "$XHARNESS_COMMAND" == "test" ]]; then if [[ -z "$JS_ENGINE" ]]; then if [[ "$SCENARIO" == "WasmTestOnNodeJs" || "$SCENARIO" == "wasmtestonnodejs" ]]; then JS_ENGINE="--engine=NodeJS" else JS_ENGINE="--engine=V8" fi fi if [[ -z "$MAIN_JS" ]]; then MAIN_JS="--js-file=test-main.js" fi if [[ -z "$JS_ENGINE_ARGS" ]]; then JS_ENGINE_ARGS="--engine-arg=--stack-trace-limit=1000" fi fi if [[ -z "$XHARNESS_ARGS" ]]; then XHARNESS_ARGS="$JS_ENGINE $JS_ENGINE_ARGS $MAIN_JS" fi echo EXECUTION_DIR=$EXECUTION_DIR echo SCENARIO=$SCENARIO echo XHARNESS_OUT=$XHARNESS_OUT echo XHARNESS_CLI_PATH=$XHARNESS_CLI_PATH echo HARNESS_RUNNER=$HARNESS_RUNNER echo XHARNESS_COMMAND=$XHARNESS_COMMAND echo MAIN_JS=$MAIN_JS echo JS_ENGINE=$JS_ENGINE echo JS_ENGINE_ARGS=$JS_ENGINE_ARGS echo XHARNESS_ARGS=$XHARNESS_ARGS function set_env_vars() { if [ "x$TEST_USING_WORKLOADS" = "xtrue" ]; then export PATH=$BASE_DIR/dotnet-workload:$PATH export SDK_HAS_WORKLOAD_INSTALLED=true export SDK_FOR_WORKLOAD_TESTING_PATH=$BASE_DIR/dotnet-workload export AppRefDir=$BASE_DIR/microsoft.netcore.app.ref elif [[ -n "$HELIX_WORKITEM_UPLOAD_ROOT" ]]; then export WasmBuildSupportDir=$BASE_DIR/build else export PATH=$BASE_DIR/sdk-no-workload:$PATH export SDK_HAS_WORKLOAD_INSTALLED=false export SDK_FOR_WORKLOAD_TESTING_PATH=$BASE_DIR/sdk-no-workload fi } export TEST_LOG_PATH=${XHARNESS_OUT}/logs pushd $EXECUTION_DIR # ========================= BEGIN Test Execution ============================= echo ----- start $(date) =============== To repro directly: ===================================================== echo pushd $EXECUTION_DIR # RunCommands defined in eng\testing\tests.wasm.targets [[RunCommandsEcho]] echo popd echo =========================================================================================================== pushd $EXECUTION_DIR # RunCommands defined in eng\testing\tests.wasm.targets [[RunCommands]] _exitCode=$? popd echo ----- end $(date) ----- exit code $_exitCode ---------------------------------------------------------- echo "XHarness artifacts: $XHARNESS_OUT" exit $_exitCode
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/mono/mono/tests/verifier/make_throw_test.sh
#! /bin/sh SED="sed" if [ `which gsed 2> /dev/null` ]; then SED="gsed" fi TEST_NAME=$1 TEST_VALIDITY=$2 TEST_LOCAL=$3 TEST_OP=$4 TEST_NAME=${TEST_VALIDITY}_${TEST_NAME} TEST_FILE=${TEST_NAME}_generated.il echo $TEST_FILE $SED -e "s/VALIDITY/${TEST_VALIDITY}/g" -e "s/LOCAL/${TEST_LOCAL}/g" -e "s/OP/${TEST_OP}/g" > $TEST_FILE <<//EOF // VALIDITY CIL which breaks the ECMA-335 rules. // this CIL should fail verification by a conforming CLI verifier. .assembly '${TEST_NAME}_generated' { .hash algorithm 0x00008004 .ver 0:0:0:0 } .class ClassA extends [mscorlib]System.Object { .method public hidebysig specialname rtspecialname instance default void .ctor () cil managed { .maxstack 8 ldarg.0 call instance void object::.ctor() ret } } .class ClassSubA extends ClassA { .method public hidebysig specialname rtspecialname instance default void .ctor () cil managed { .maxstack 8 ldarg.0 call instance void ClassA::.ctor() ret } } .class public auto ansi sealed MyStruct extends [mscorlib]System.ValueType { .field public int32 foo } .method public static void foo() cil managed { .maxstack 8 .locals init (LOCAL l_0 ) ldloc.0 OP throw // VALIDITY. ret } .method public static int32 Main() cil managed { .maxstack 8 .entrypoint .try { call void foo () leave END } catch [mscorlib]System.NullReferenceException { pop leave END } END: ldc.i4.0 ret } //EOF
#! /bin/sh SED="sed" if [ `which gsed 2> /dev/null` ]; then SED="gsed" fi TEST_NAME=$1 TEST_VALIDITY=$2 TEST_LOCAL=$3 TEST_OP=$4 TEST_NAME=${TEST_VALIDITY}_${TEST_NAME} TEST_FILE=${TEST_NAME}_generated.il echo $TEST_FILE $SED -e "s/VALIDITY/${TEST_VALIDITY}/g" -e "s/LOCAL/${TEST_LOCAL}/g" -e "s/OP/${TEST_OP}/g" > $TEST_FILE <<//EOF // VALIDITY CIL which breaks the ECMA-335 rules. // this CIL should fail verification by a conforming CLI verifier. .assembly '${TEST_NAME}_generated' { .hash algorithm 0x00008004 .ver 0:0:0:0 } .class ClassA extends [mscorlib]System.Object { .method public hidebysig specialname rtspecialname instance default void .ctor () cil managed { .maxstack 8 ldarg.0 call instance void object::.ctor() ret } } .class ClassSubA extends ClassA { .method public hidebysig specialname rtspecialname instance default void .ctor () cil managed { .maxstack 8 ldarg.0 call instance void ClassA::.ctor() ret } } .class public auto ansi sealed MyStruct extends [mscorlib]System.ValueType { .field public int32 foo } .method public static void foo() cil managed { .maxstack 8 .locals init (LOCAL l_0 ) ldloc.0 OP throw // VALIDITY. ret } .method public static int32 Main() cil managed { .maxstack 8 .entrypoint .try { call void foo () leave END } catch [mscorlib]System.NullReferenceException { pop leave END } END: ldc.i4.0 ret } //EOF
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/mono/mono/tests/verifier/make_il_overflow_test.sh
#! /bin/sh SED="sed" if [ `which gsed 2> /dev/null` ]; then SED="gsed" fi TEST_NAME=$1 TEST_VALIDITY=$2 TEST_BYTE_0=$3 TEST_BYTE_1=$4 TEST_BYTE_2=$5 TEST_BYTE_3=$6 TEST_BYTE_4=$7 if [ -n "$TEST_BYTE_1" ]; then EMIT_BYTE_1=".emitbyte $TEST_BYTE_1"; fi if [ -n "$TEST_BYTE_2" ]; then EMIT_BYTE_2=".emitbyte $TEST_BYTE_2"; fi if [ -n "$TEST_BYTE_3" ]; then EMIT_BYTE_3=".emitbyte $TEST_BYTE_3"; fi if [ -n "$TEST_BYTE_4" ]; then EMIT_BYTE_4=".emitbyte $TEST_BYTE_4"; fi if [ -n "$TEST_BYTE_5" ]; then EMIT_BYTE_5=".emitbyte $TEST_BYTE_5"; fi TEST_FILE=`echo ${TEST_VALIDITY}_${TEST_NAME} | $SED -e 's/ /_/g' -e 's/\./_/g' -e 's/&/mp/g' -e 's/\[/_/g' -e 's/\]/_/g'`_generated.il echo $TEST_FILE $SED -e "s/VALIDITY/${TEST_VALIDITY}/g" -e "s/BYTE_0/${TEST_BYTE_0}/g" -e "s/BYTE_1/${TEST_BYTE_1}/g" > $TEST_FILE <<//EOF // VALIDITY CIL which breaks the ECMA-335 rules. // this CIL should fail verification by a conforming CLI verifier. .assembly '${TEST_NAME}_generated' { .hash algorithm 0x00008004 .ver 0:0:0:0 } .method public static void Main() cil managed { .entrypoint .maxstack 2 .locals init () nop nop .emitbyte BYTE_0 ${EMIT_BYTE_1} ${EMIT_BYTE_2} ${EMIT_BYTE_3} ${EMIT_BYTE_4} ${EMIT_BYTE_5} } //EOF
#! /bin/sh SED="sed" if [ `which gsed 2> /dev/null` ]; then SED="gsed" fi TEST_NAME=$1 TEST_VALIDITY=$2 TEST_BYTE_0=$3 TEST_BYTE_1=$4 TEST_BYTE_2=$5 TEST_BYTE_3=$6 TEST_BYTE_4=$7 if [ -n "$TEST_BYTE_1" ]; then EMIT_BYTE_1=".emitbyte $TEST_BYTE_1"; fi if [ -n "$TEST_BYTE_2" ]; then EMIT_BYTE_2=".emitbyte $TEST_BYTE_2"; fi if [ -n "$TEST_BYTE_3" ]; then EMIT_BYTE_3=".emitbyte $TEST_BYTE_3"; fi if [ -n "$TEST_BYTE_4" ]; then EMIT_BYTE_4=".emitbyte $TEST_BYTE_4"; fi if [ -n "$TEST_BYTE_5" ]; then EMIT_BYTE_5=".emitbyte $TEST_BYTE_5"; fi TEST_FILE=`echo ${TEST_VALIDITY}_${TEST_NAME} | $SED -e 's/ /_/g' -e 's/\./_/g' -e 's/&/mp/g' -e 's/\[/_/g' -e 's/\]/_/g'`_generated.il echo $TEST_FILE $SED -e "s/VALIDITY/${TEST_VALIDITY}/g" -e "s/BYTE_0/${TEST_BYTE_0}/g" -e "s/BYTE_1/${TEST_BYTE_1}/g" > $TEST_FILE <<//EOF // VALIDITY CIL which breaks the ECMA-335 rules. // this CIL should fail verification by a conforming CLI verifier. .assembly '${TEST_NAME}_generated' { .hash algorithm 0x00008004 .ver 0:0:0:0 } .method public static void Main() cil managed { .entrypoint .maxstack 2 .locals init () nop nop .emitbyte BYTE_0 ${EMIT_BYTE_1} ${EMIT_BYTE_2} ${EMIT_BYTE_3} ${EMIT_BYTE_4} ${EMIT_BYTE_5} } //EOF
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/libraries/System.Net.Http/tests/StressTests/HttpStress/build-local.sh
#!/usr/bin/env bash ## This is a helper script for non-containerized local build and test execution. ## It downloads and uses the daily SDK which contains the compatible AspNetCore bits. ## Usage: ## ./build-local.sh [StressConfiguration] [LibrariesConfiguration] version=7.0 repo_root=$(git rev-parse --show-toplevel) daily_dotnet_root=./.dotnet-daily stress_configuration="Release" if [ "$1" != "" ]; then stress_configuration=${1,,} # Lowercase all characters in $1 stress_configuration=${stress_configuration^} # Uppercase first character fi libraries_configuration="Release" if [ "$2" != "" ]; then libraries_configuration=${2,,} # Lowercase all characters in $1 libraries_configuration=${libraries_configuration^} # Uppercase first character fi testhost_root=$repo_root/artifacts/bin/testhost/net$version-Linux-$libraries_configuration-x64 echo "StressConfiguration: $stress_configuration, LibrariesConfiguration: $libraries_configuration, testhost: $testhost_root" if [[ ! -d $testhost_root ]]; then echo "Cannot find testhost in: $testhost_root" echo "Make sure libraries with the requested configuration are built!" echo "Usage:" echo "./build-local.sh [StressConfiguration] [LibrariesConfiguration]" echo "StressConfiguration and LibrariesConfiguration default to Release!" exit 1 fi if [[ ! -d $daily_dotnet_root ]]; then echo "Downloading daily SDK to $daily_dotnet_root" mkdir $daily_dotnet_root wget https://dot.net/v1/dotnet-install.sh -O $daily_dotnet_root/dotnet-install.sh bash $daily_dotnet_root/dotnet-install.sh --no-path --channel $version.1xx --quality daily --install-dir $daily_dotnet_root else echo "Daily SDK found in $daily_dotnet_root" fi export DOTNET_ROOT=$daily_dotnet_root export PATH=$DOTNET_ROOT:$PATH export DOTNET_MULTILEVEL_LOOKUP=0 if [[ ! -d "$testhost_root/shared/Microsoft.AspNetCore.App" ]]; then echo "Copying Microsoft.AspNetCore.App bits from daily SDK to testhost: $testhost_root" cp -r $daily_dotnet_root/shared/Microsoft.AspNetCore.App $testhost_root/shared/Microsoft.AspNetCore.App else echo "Microsoft.AspNetCore.App found in testhost: $testhost_root" fi echo "Building solution." dotnet build -c $stress_configuration runscript=./run-stress-${stress_configuration,,}-${libraries_configuration,,}.sh if [[ ! -f $runscript ]]; then echo "Generating runscript." echo "$testhost_root/dotnet exec ./bin/$stress_configuration/net$version/HttpStress.dll \$@" > $runscript chmod +x $runscript fi echo "To run tests type:" echo "$runscript [stress test args]"
#!/usr/bin/env bash ## This is a helper script for non-containerized local build and test execution. ## It downloads and uses the daily SDK which contains the compatible AspNetCore bits. ## Usage: ## ./build-local.sh [StressConfiguration] [LibrariesConfiguration] version=7.0 repo_root=$(git rev-parse --show-toplevel) daily_dotnet_root=./.dotnet-daily stress_configuration="Release" if [ "$1" != "" ]; then stress_configuration=${1,,} # Lowercase all characters in $1 stress_configuration=${stress_configuration^} # Uppercase first character fi libraries_configuration="Release" if [ "$2" != "" ]; then libraries_configuration=${2,,} # Lowercase all characters in $1 libraries_configuration=${libraries_configuration^} # Uppercase first character fi testhost_root=$repo_root/artifacts/bin/testhost/net$version-Linux-$libraries_configuration-x64 echo "StressConfiguration: $stress_configuration, LibrariesConfiguration: $libraries_configuration, testhost: $testhost_root" if [[ ! -d $testhost_root ]]; then echo "Cannot find testhost in: $testhost_root" echo "Make sure libraries with the requested configuration are built!" echo "Usage:" echo "./build-local.sh [StressConfiguration] [LibrariesConfiguration]" echo "StressConfiguration and LibrariesConfiguration default to Release!" exit 1 fi if [[ ! -d $daily_dotnet_root ]]; then echo "Downloading daily SDK to $daily_dotnet_root" mkdir $daily_dotnet_root wget https://dot.net/v1/dotnet-install.sh -O $daily_dotnet_root/dotnet-install.sh bash $daily_dotnet_root/dotnet-install.sh --no-path --channel $version.1xx --quality daily --install-dir $daily_dotnet_root else echo "Daily SDK found in $daily_dotnet_root" fi export DOTNET_ROOT=$daily_dotnet_root export PATH=$DOTNET_ROOT:$PATH export DOTNET_MULTILEVEL_LOOKUP=0 if [[ ! -d "$testhost_root/shared/Microsoft.AspNetCore.App" ]]; then echo "Copying Microsoft.AspNetCore.App bits from daily SDK to testhost: $testhost_root" cp -r $daily_dotnet_root/shared/Microsoft.AspNetCore.App $testhost_root/shared/Microsoft.AspNetCore.App else echo "Microsoft.AspNetCore.App found in testhost: $testhost_root" fi echo "Building solution." dotnet build -c $stress_configuration runscript=./run-stress-${stress_configuration,,}-${libraries_configuration,,}.sh if [[ ! -f $runscript ]]; then echo "Generating runscript." echo "$testhost_root/dotnet exec ./bin/$stress_configuration/net$version/HttpStress.dll \$@" > $runscript chmod +x $runscript fi echo "To run tests type:" echo "$runscript [stress test args]"
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./eng/common/SetupNugetSources.sh
#!/usr/bin/env bash # This file is a temporary workaround for internal builds to be able to restore from private AzDO feeds. # This file should be removed as part of this issue: https://github.com/dotnet/arcade/issues/4080 # # What the script does is iterate over all package sources in the pointed NuGet.config and add a credential entry # under <packageSourceCredentials> for each Maestro's managed private feed. Two additional credential # entries are also added for the two private static internal feeds: dotnet3-internal and dotnet3-internal-transport. # # This script needs to be called in every job that will restore packages and which the base repo has # private AzDO feeds in the NuGet.config. # # See example YAML call for this script below. Note the use of the variable `$(dn-bot-dnceng-artifact-feeds-rw)` # from the AzureDevOps-Artifact-Feeds-Pats variable group. # # Any disabledPackageSources entries which start with "darc-int" will be re-enabled as part of this script executing. # # - task: Bash@3 # displayName: Setup Private Feeds Credentials # inputs: # filePath: $(Build.SourcesDirectory)/eng/common/SetupNugetSources.sh # arguments: $(Build.SourcesDirectory)/NuGet.config $Token # condition: ne(variables['Agent.OS'], 'Windows_NT') # env: # Token: $(dn-bot-dnceng-artifact-feeds-rw) ConfigFile=$1 CredToken=$2 NL='\n' TB=' ' source="${BASH_SOURCE[0]}" # resolve $source until the file is no longer a symlink while [[ -h "$source" ]]; do scriptroot="$( cd -P "$( dirname "$source" )" && pwd )" source="$(readlink "$source")" # if $source was a relative symlink, we need to resolve it relative to the path where the # symlink file was located [[ $source != /* ]] && source="$scriptroot/$source" done scriptroot="$( cd -P "$( dirname "$source" )" && pwd )" . "$scriptroot/tools.sh" if [ ! -f "$ConfigFile" ]; then Write-PipelineTelemetryError -Category 'Build' "Error: Eng/common/SetupNugetSources.sh returned a non-zero exit code. Couldn't find the NuGet config file: $ConfigFile" ExitWithExitCode 1 fi if [ -z "$CredToken" ]; then Write-PipelineTelemetryError -category 'Build' "Error: Eng/common/SetupNugetSources.sh returned a non-zero exit code. Please supply a valid PAT" ExitWithExitCode 1 fi if [[ `uname -s` == "Darwin" ]]; then NL=$'\\\n' TB='' fi # Ensure there is a <packageSources>...</packageSources> section. grep -i "<packageSources>" $ConfigFile if [ "$?" != "0" ]; then echo "Adding <packageSources>...</packageSources> section." ConfigNodeHeader="<configuration>" PackageSourcesTemplate="${TB}<packageSources>${NL}${TB}</packageSources>" sed -i.bak "s|$ConfigNodeHeader|$ConfigNodeHeader${NL}$PackageSourcesTemplate|" $ConfigFile fi # Ensure there is a <packageSourceCredentials>...</packageSourceCredentials> section. grep -i "<packageSourceCredentials>" $ConfigFile if [ "$?" != "0" ]; then echo "Adding <packageSourceCredentials>...</packageSourceCredentials> section." PackageSourcesNodeFooter="</packageSources>" PackageSourceCredentialsTemplate="${TB}<packageSourceCredentials>${NL}${TB}</packageSourceCredentials>" sed -i.bak "s|$PackageSourcesNodeFooter|$PackageSourcesNodeFooter${NL}$PackageSourceCredentialsTemplate|" $ConfigFile fi PackageSources=() # Ensure dotnet3.1-internal and dotnet3.1-internal-transport are in the packageSources if the public dotnet3.1 feeds are present grep -i "<add key=\"dotnet3.1\"" $ConfigFile if [ "$?" == "0" ]; then grep -i "<add key=\"dotnet3.1-internal\"" $ConfigFile if [ "$?" != "0" ]; then echo "Adding dotnet3.1-internal to the packageSources." PackageSourcesNodeFooter="</packageSources>" PackageSourceTemplate="${TB}<add key=\"dotnet3.1-internal\" value=\"https://pkgs.dev.azure.com/dnceng/_packaging/dotnet3.1-internal/nuget/v2\" />" sed -i.bak "s|$PackageSourcesNodeFooter|$PackageSourceTemplate${NL}$PackageSourcesNodeFooter|" $ConfigFile fi PackageSources+=('dotnet3.1-internal') grep -i "<add key=\"dotnet3.1-internal-transport\">" $ConfigFile if [ "$?" != "0" ]; then echo "Adding dotnet3.1-internal-transport to the packageSources." PackageSourcesNodeFooter="</packageSources>" PackageSourceTemplate="${TB}<add key=\"dotnet3.1-internal-transport\" value=\"https://pkgs.dev.azure.com/dnceng/_packaging/dotnet3.1-internal-transport/nuget/v2\" />" sed -i.bak "s|$PackageSourcesNodeFooter|$PackageSourceTemplate${NL}$PackageSourcesNodeFooter|" $ConfigFile fi PackageSources+=('dotnet3.1-internal-transport') fi # Ensure dotnet5-internal and dotnet5-internal-transport are in the packageSources if the public dotnet5 feeds are present grep -i "<add key=\"dotnet5\"" $ConfigFile if [ "$?" == "0" ]; then grep -i "<add key=\"dotnet5-internal\"" $ConfigFile if [ "$?" != "0" ]; then echo "Adding dotnet5-internal to the packageSources." PackageSourcesNodeFooter="</packageSources>" PackageSourceTemplate="${TB}<add key=\"dotnet5-internal\" value=\"https://pkgs.dev.azure.com/dnceng/internal/_packaging/dotnet5-internal/nuget/v2\" />" sed -i.bak "s|$PackageSourcesNodeFooter|$PackageSourceTemplate${NL}$PackageSourcesNodeFooter|" $ConfigFile fi PackageSources+=('dotnet5-internal') grep -i "<add key=\"dotnet5-internal-transport\">" $ConfigFile if [ "$?" != "0" ]; then echo "Adding dotnet5-internal-transport to the packageSources." PackageSourcesNodeFooter="</packageSources>" PackageSourceTemplate="${TB}<add key=\"dotnet5-internal-transport\" value=\"https://pkgs.dev.azure.com/dnceng/internal/_packaging/dotnet5-internal-transport/nuget/v2\" />" sed -i.bak "s|$PackageSourcesNodeFooter|$PackageSourceTemplate${NL}$PackageSourcesNodeFooter|" $ConfigFile fi PackageSources+=('dotnet5-internal-transport') fi # Ensure dotnet6-internal and dotnet6-internal-transport are in the packageSources if the public dotnet6 feeds are present grep -i "<add key=\"dotnet6\"" $ConfigFile if [ "$?" == "0" ]; then grep -i "<add key=\"dotnet6-internal\"" $ConfigFile if [ "$?" != "0" ]; then echo "Adding dotnet6-internal to the packageSources." PackageSourcesNodeFooter="</packageSources>" PackageSourceTemplate="${TB}<add key=\"dotnet6-internal\" value=\"https://pkgs.dev.azure.com/dnceng/internal/_packaging/dotnet6-internal/nuget/v2\" />" sed -i.bak "s|$PackageSourcesNodeFooter|$PackageSourceTemplate${NL}$PackageSourcesNodeFooter|" $ConfigFile fi PackageSources+=('dotnet6-internal') grep -i "<add key=\"dotnet6-internal-transport\">" $ConfigFile if [ "$?" != "0" ]; then echo "Adding dotnet6-internal-transport to the packageSources." PackageSourcesNodeFooter="</packageSources>" PackageSourceTemplate="${TB}<add key=\"dotnet6-internal-transport\" value=\"https://pkgs.dev.azure.com/dnceng/internal/_packaging/dotnet6-internal-transport/nuget/v2\" />" sed -i.bak "s|$PackageSourcesNodeFooter|$PackageSourceTemplate${NL}$PackageSourcesNodeFooter|" $ConfigFile fi PackageSources+=('dotnet6-internal-transport') fi # I want things split line by line PrevIFS=$IFS IFS=$'\n' PackageSources+="$IFS" PackageSources+=$(grep -oh '"darc-int-[^"]*"' $ConfigFile | tr -d '"') IFS=$PrevIFS for FeedName in ${PackageSources[@]} ; do # Check if there is no existing credential for this FeedName grep -i "<$FeedName>" $ConfigFile if [ "$?" != "0" ]; then echo "Adding credentials for $FeedName." PackageSourceCredentialsNodeFooter="</packageSourceCredentials>" NewCredential="${TB}${TB}<$FeedName>${NL}<add key=\"Username\" value=\"dn-bot\" />${NL}<add key=\"ClearTextPassword\" value=\"$CredToken\" />${NL}</$FeedName>" sed -i.bak "s|$PackageSourceCredentialsNodeFooter|$NewCredential${NL}$PackageSourceCredentialsNodeFooter|" $ConfigFile fi done # Re-enable any entries in disabledPackageSources where the feed name contains darc-int grep -i "<disabledPackageSources>" $ConfigFile if [ "$?" == "0" ]; then DisabledDarcIntSources=() echo "Re-enabling any disabled \"darc-int\" package sources in $ConfigFile" DisabledDarcIntSources+=$(grep -oh '"darc-int-[^"]*" value="true"' $ConfigFile | tr -d '"') for DisabledSourceName in ${DisabledDarcIntSources[@]} ; do if [[ $DisabledSourceName == darc-int* ]] then OldDisableValue="<add key=\"$DisabledSourceName\" value=\"true\" />" NewDisableValue="<!-- Reenabled for build : $DisabledSourceName -->" sed -i.bak "s|$OldDisableValue|$NewDisableValue|" $ConfigFile echo "Neutralized disablePackageSources entry for '$DisabledSourceName'" fi done fi
#!/usr/bin/env bash # This file is a temporary workaround for internal builds to be able to restore from private AzDO feeds. # This file should be removed as part of this issue: https://github.com/dotnet/arcade/issues/4080 # # What the script does is iterate over all package sources in the pointed NuGet.config and add a credential entry # under <packageSourceCredentials> for each Maestro's managed private feed. Two additional credential # entries are also added for the two private static internal feeds: dotnet3-internal and dotnet3-internal-transport. # # This script needs to be called in every job that will restore packages and which the base repo has # private AzDO feeds in the NuGet.config. # # See example YAML call for this script below. Note the use of the variable `$(dn-bot-dnceng-artifact-feeds-rw)` # from the AzureDevOps-Artifact-Feeds-Pats variable group. # # Any disabledPackageSources entries which start with "darc-int" will be re-enabled as part of this script executing. # # - task: Bash@3 # displayName: Setup Private Feeds Credentials # inputs: # filePath: $(Build.SourcesDirectory)/eng/common/SetupNugetSources.sh # arguments: $(Build.SourcesDirectory)/NuGet.config $Token # condition: ne(variables['Agent.OS'], 'Windows_NT') # env: # Token: $(dn-bot-dnceng-artifact-feeds-rw) ConfigFile=$1 CredToken=$2 NL='\n' TB=' ' source="${BASH_SOURCE[0]}" # resolve $source until the file is no longer a symlink while [[ -h "$source" ]]; do scriptroot="$( cd -P "$( dirname "$source" )" && pwd )" source="$(readlink "$source")" # if $source was a relative symlink, we need to resolve it relative to the path where the # symlink file was located [[ $source != /* ]] && source="$scriptroot/$source" done scriptroot="$( cd -P "$( dirname "$source" )" && pwd )" . "$scriptroot/tools.sh" if [ ! -f "$ConfigFile" ]; then Write-PipelineTelemetryError -Category 'Build' "Error: Eng/common/SetupNugetSources.sh returned a non-zero exit code. Couldn't find the NuGet config file: $ConfigFile" ExitWithExitCode 1 fi if [ -z "$CredToken" ]; then Write-PipelineTelemetryError -category 'Build' "Error: Eng/common/SetupNugetSources.sh returned a non-zero exit code. Please supply a valid PAT" ExitWithExitCode 1 fi if [[ `uname -s` == "Darwin" ]]; then NL=$'\\\n' TB='' fi # Ensure there is a <packageSources>...</packageSources> section. grep -i "<packageSources>" $ConfigFile if [ "$?" != "0" ]; then echo "Adding <packageSources>...</packageSources> section." ConfigNodeHeader="<configuration>" PackageSourcesTemplate="${TB}<packageSources>${NL}${TB}</packageSources>" sed -i.bak "s|$ConfigNodeHeader|$ConfigNodeHeader${NL}$PackageSourcesTemplate|" $ConfigFile fi # Ensure there is a <packageSourceCredentials>...</packageSourceCredentials> section. grep -i "<packageSourceCredentials>" $ConfigFile if [ "$?" != "0" ]; then echo "Adding <packageSourceCredentials>...</packageSourceCredentials> section." PackageSourcesNodeFooter="</packageSources>" PackageSourceCredentialsTemplate="${TB}<packageSourceCredentials>${NL}${TB}</packageSourceCredentials>" sed -i.bak "s|$PackageSourcesNodeFooter|$PackageSourcesNodeFooter${NL}$PackageSourceCredentialsTemplate|" $ConfigFile fi PackageSources=() # Ensure dotnet3.1-internal and dotnet3.1-internal-transport are in the packageSources if the public dotnet3.1 feeds are present grep -i "<add key=\"dotnet3.1\"" $ConfigFile if [ "$?" == "0" ]; then grep -i "<add key=\"dotnet3.1-internal\"" $ConfigFile if [ "$?" != "0" ]; then echo "Adding dotnet3.1-internal to the packageSources." PackageSourcesNodeFooter="</packageSources>" PackageSourceTemplate="${TB}<add key=\"dotnet3.1-internal\" value=\"https://pkgs.dev.azure.com/dnceng/_packaging/dotnet3.1-internal/nuget/v2\" />" sed -i.bak "s|$PackageSourcesNodeFooter|$PackageSourceTemplate${NL}$PackageSourcesNodeFooter|" $ConfigFile fi PackageSources+=('dotnet3.1-internal') grep -i "<add key=\"dotnet3.1-internal-transport\">" $ConfigFile if [ "$?" != "0" ]; then echo "Adding dotnet3.1-internal-transport to the packageSources." PackageSourcesNodeFooter="</packageSources>" PackageSourceTemplate="${TB}<add key=\"dotnet3.1-internal-transport\" value=\"https://pkgs.dev.azure.com/dnceng/_packaging/dotnet3.1-internal-transport/nuget/v2\" />" sed -i.bak "s|$PackageSourcesNodeFooter|$PackageSourceTemplate${NL}$PackageSourcesNodeFooter|" $ConfigFile fi PackageSources+=('dotnet3.1-internal-transport') fi # Ensure dotnet5-internal and dotnet5-internal-transport are in the packageSources if the public dotnet5 feeds are present grep -i "<add key=\"dotnet5\"" $ConfigFile if [ "$?" == "0" ]; then grep -i "<add key=\"dotnet5-internal\"" $ConfigFile if [ "$?" != "0" ]; then echo "Adding dotnet5-internal to the packageSources." PackageSourcesNodeFooter="</packageSources>" PackageSourceTemplate="${TB}<add key=\"dotnet5-internal\" value=\"https://pkgs.dev.azure.com/dnceng/internal/_packaging/dotnet5-internal/nuget/v2\" />" sed -i.bak "s|$PackageSourcesNodeFooter|$PackageSourceTemplate${NL}$PackageSourcesNodeFooter|" $ConfigFile fi PackageSources+=('dotnet5-internal') grep -i "<add key=\"dotnet5-internal-transport\">" $ConfigFile if [ "$?" != "0" ]; then echo "Adding dotnet5-internal-transport to the packageSources." PackageSourcesNodeFooter="</packageSources>" PackageSourceTemplate="${TB}<add key=\"dotnet5-internal-transport\" value=\"https://pkgs.dev.azure.com/dnceng/internal/_packaging/dotnet5-internal-transport/nuget/v2\" />" sed -i.bak "s|$PackageSourcesNodeFooter|$PackageSourceTemplate${NL}$PackageSourcesNodeFooter|" $ConfigFile fi PackageSources+=('dotnet5-internal-transport') fi # Ensure dotnet6-internal and dotnet6-internal-transport are in the packageSources if the public dotnet6 feeds are present grep -i "<add key=\"dotnet6\"" $ConfigFile if [ "$?" == "0" ]; then grep -i "<add key=\"dotnet6-internal\"" $ConfigFile if [ "$?" != "0" ]; then echo "Adding dotnet6-internal to the packageSources." PackageSourcesNodeFooter="</packageSources>" PackageSourceTemplate="${TB}<add key=\"dotnet6-internal\" value=\"https://pkgs.dev.azure.com/dnceng/internal/_packaging/dotnet6-internal/nuget/v2\" />" sed -i.bak "s|$PackageSourcesNodeFooter|$PackageSourceTemplate${NL}$PackageSourcesNodeFooter|" $ConfigFile fi PackageSources+=('dotnet6-internal') grep -i "<add key=\"dotnet6-internal-transport\">" $ConfigFile if [ "$?" != "0" ]; then echo "Adding dotnet6-internal-transport to the packageSources." PackageSourcesNodeFooter="</packageSources>" PackageSourceTemplate="${TB}<add key=\"dotnet6-internal-transport\" value=\"https://pkgs.dev.azure.com/dnceng/internal/_packaging/dotnet6-internal-transport/nuget/v2\" />" sed -i.bak "s|$PackageSourcesNodeFooter|$PackageSourceTemplate${NL}$PackageSourcesNodeFooter|" $ConfigFile fi PackageSources+=('dotnet6-internal-transport') fi # I want things split line by line PrevIFS=$IFS IFS=$'\n' PackageSources+="$IFS" PackageSources+=$(grep -oh '"darc-int-[^"]*"' $ConfigFile | tr -d '"') IFS=$PrevIFS for FeedName in ${PackageSources[@]} ; do # Check if there is no existing credential for this FeedName grep -i "<$FeedName>" $ConfigFile if [ "$?" != "0" ]; then echo "Adding credentials for $FeedName." PackageSourceCredentialsNodeFooter="</packageSourceCredentials>" NewCredential="${TB}${TB}<$FeedName>${NL}<add key=\"Username\" value=\"dn-bot\" />${NL}<add key=\"ClearTextPassword\" value=\"$CredToken\" />${NL}</$FeedName>" sed -i.bak "s|$PackageSourceCredentialsNodeFooter|$NewCredential${NL}$PackageSourceCredentialsNodeFooter|" $ConfigFile fi done # Re-enable any entries in disabledPackageSources where the feed name contains darc-int grep -i "<disabledPackageSources>" $ConfigFile if [ "$?" == "0" ]; then DisabledDarcIntSources=() echo "Re-enabling any disabled \"darc-int\" package sources in $ConfigFile" DisabledDarcIntSources+=$(grep -oh '"darc-int-[^"]*" value="true"' $ConfigFile | tr -d '"') for DisabledSourceName in ${DisabledDarcIntSources[@]} ; do if [[ $DisabledSourceName == darc-int* ]] then OldDisableValue="<add key=\"$DisabledSourceName\" value=\"true\" />" NewDisableValue="<!-- Reenabled for build : $DisabledSourceName -->" sed -i.bak "s|$OldDisableValue|$NewDisableValue|" $ConfigFile echo "Neutralized disablePackageSources entry for '$DisabledSourceName'" fi done fi
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/mono/mono/tests/verifier/make_cmmp_test.sh
#! /bin/sh SED="sed" if [ `which gsed 2> /dev/null` ]; then SED="gsed" fi TEST_NAME=$1 TEST_VALIDITY=$2 TEST_OP=$3 TEST_LOAD_OP=$4 TEST_TYPE=$5 TEST_BEFORE_OP=$6 echo $TEST_OP | grep unbox > /dev/null; if [ $? -eq 0 ]; then TEST_CODE=" ldloc.0 box $TEST_TYPE"; else TEST_CODE=" ldc.i4.1 newarr $TEST_TYPE ldc.i4.0"; fi TEST_FILE=`echo ${TEST_VALIDITY}_${TEST_NAME} | $SED -e 's/ /_/g' -e 's/\./_/g' -e 's/&/mp/g' -e 's/\[/_/g' -e 's/\]/_/g'`_generated.il echo $TEST_FILE $SED -e "s/TYPE/${TEST_TYPE}/g" -e "s/OPCODE/${TEST_OP}/g" -e "s/BEFORE_OP/${TEST_BEFORE_OP}/g" -e "s/LOAD_OP/${TEST_LOAD_OP}/g"> $TEST_FILE <<//EOF .assembly extern mscorlib { .ver 2:0:0:0 .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) // .z\V.4.. } .assembly 'cmmp_test' { .hash algorithm 0x00008004 .ver 0:0:0:0 } .module cmmp.exe .class ClassA extends [mscorlib]System.Object { .field public int32 valid .method public hidebysig specialname rtspecialname instance default void .ctor (int32&) cil managed { .maxstack 8 ldarg.0 call instance void object::.ctor() ret } .method public virtual void VirtTest (ClassA& arg) { ret } } .class public Template\`1<T> extends [mscorlib]System.Object { } .class sealed public StructTemplate\`1<T> extends [mscorlib]System.ValueType { .field public !0 t } .class sealed public StructTemplate2\`1<T> extends [mscorlib]System.ValueType { .field public !0 t } .class public auto ansi sealed MyStruct extends [mscorlib]System.ValueType { .field public int32 foo .field public native int ptr .method public static void Test (MyStruct& arg) { ret } } .class public auto ansi sealed MyEnum extends [mscorlib]System.Enum { .field public specialname rtspecialname int32 value__ .field public static literal valuetype MyEnum B = int32(0x00000000) .field public static literal valuetype MyEnum C = int32(0x00000001) } .method public static int32 Main () { .entrypoint .maxstack 8 .locals init (TYPE V_0) BEFORE_OP ${TEST_CODE} OPCODE LOAD_OP leave END END: ldc.i4.0 ret } //EOF
#! /bin/sh SED="sed" if [ `which gsed 2> /dev/null` ]; then SED="gsed" fi TEST_NAME=$1 TEST_VALIDITY=$2 TEST_OP=$3 TEST_LOAD_OP=$4 TEST_TYPE=$5 TEST_BEFORE_OP=$6 echo $TEST_OP | grep unbox > /dev/null; if [ $? -eq 0 ]; then TEST_CODE=" ldloc.0 box $TEST_TYPE"; else TEST_CODE=" ldc.i4.1 newarr $TEST_TYPE ldc.i4.0"; fi TEST_FILE=`echo ${TEST_VALIDITY}_${TEST_NAME} | $SED -e 's/ /_/g' -e 's/\./_/g' -e 's/&/mp/g' -e 's/\[/_/g' -e 's/\]/_/g'`_generated.il echo $TEST_FILE $SED -e "s/TYPE/${TEST_TYPE}/g" -e "s/OPCODE/${TEST_OP}/g" -e "s/BEFORE_OP/${TEST_BEFORE_OP}/g" -e "s/LOAD_OP/${TEST_LOAD_OP}/g"> $TEST_FILE <<//EOF .assembly extern mscorlib { .ver 2:0:0:0 .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) // .z\V.4.. } .assembly 'cmmp_test' { .hash algorithm 0x00008004 .ver 0:0:0:0 } .module cmmp.exe .class ClassA extends [mscorlib]System.Object { .field public int32 valid .method public hidebysig specialname rtspecialname instance default void .ctor (int32&) cil managed { .maxstack 8 ldarg.0 call instance void object::.ctor() ret } .method public virtual void VirtTest (ClassA& arg) { ret } } .class public Template\`1<T> extends [mscorlib]System.Object { } .class sealed public StructTemplate\`1<T> extends [mscorlib]System.ValueType { .field public !0 t } .class sealed public StructTemplate2\`1<T> extends [mscorlib]System.ValueType { .field public !0 t } .class public auto ansi sealed MyStruct extends [mscorlib]System.ValueType { .field public int32 foo .field public native int ptr .method public static void Test (MyStruct& arg) { ret } } .class public auto ansi sealed MyEnum extends [mscorlib]System.Enum { .field public specialname rtspecialname int32 value__ .field public static literal valuetype MyEnum B = int32(0x00000000) .field public static literal valuetype MyEnum C = int32(0x00000001) } .method public static int32 Main () { .entrypoint .maxstack 8 .locals init (TYPE V_0) BEFORE_OP ${TEST_CODE} OPCODE LOAD_OP leave END END: ldc.i4.0 ret } //EOF
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/mono/cmake/QuietOSXRanlib.cmake
if((CMAKE_SYSTEM_NAME STREQUAL "Darwin") OR (CMAKE_SYSTEM_NAME STREQUAL "iOS")) # Quiet 'file ... has no symbols' messages from ranlib find_program(CMAKE_XCRUN NAMES xcrun) execute_process(COMMAND ${CMAKE_XCRUN} -find libtool OUTPUT_VARIABLE CMAKE_LIBTOOL OUTPUT_STRIP_TRAILING_WHITESPACE) get_property(languages GLOBAL PROPERTY ENABLED_LANGUAGES) # -D makes ranlib deterministic set(RANLIB_FLAGS "-D -no_warning_for_no_symbols") foreach(lang ${languages}) set(CMAKE_${lang}_CREATE_STATIC_LIBRARY "\"${CMAKE_LIBTOOL}\" -static ${RANLIB_FLAGS} -o <TARGET> <LINK_FLAGS> <OBJECTS>") endforeach() # Another instance set(MONO_RANLIB "${PROJECT_BINARY_DIR}/mono-ranlib") file(WRITE ${MONO_RANLIB} "#!/bin/sh\n") file(APPEND ${MONO_RANLIB} "${CMAKE_RANLIB} ${RANLIB_FLAGS} $*") execute_process(COMMAND chmod a+x ${MONO_RANLIB}) set(CMAKE_RANLIB "${MONO_RANLIB}") endif()
if((CMAKE_SYSTEM_NAME STREQUAL "Darwin") OR (CMAKE_SYSTEM_NAME STREQUAL "iOS")) # Quiet 'file ... has no symbols' messages from ranlib find_program(CMAKE_XCRUN NAMES xcrun) execute_process(COMMAND ${CMAKE_XCRUN} -find libtool OUTPUT_VARIABLE CMAKE_LIBTOOL OUTPUT_STRIP_TRAILING_WHITESPACE) get_property(languages GLOBAL PROPERTY ENABLED_LANGUAGES) # -D makes ranlib deterministic set(RANLIB_FLAGS "-D -no_warning_for_no_symbols") foreach(lang ${languages}) set(CMAKE_${lang}_CREATE_STATIC_LIBRARY "\"${CMAKE_LIBTOOL}\" -static ${RANLIB_FLAGS} -o <TARGET> <LINK_FLAGS> <OBJECTS>") endforeach() # Another instance set(MONO_RANLIB "${PROJECT_BINARY_DIR}/mono-ranlib") file(WRITE ${MONO_RANLIB} "#!/bin/sh\n") file(APPEND ${MONO_RANLIB} "${CMAKE_RANLIB} ${RANLIB_FLAGS} $*") execute_process(COMMAND chmod a+x ${MONO_RANLIB}) set(CMAKE_RANLIB "${MONO_RANLIB}") endif()
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/mono/mono/tests/verifier/make_bad_op_test.sh
#! /bin/sh SED="sed" if [ `which gsed 2> /dev/null` ]; then SED="gsed" fi TEST_NAME=$1 TEST_VALIDITY=$2 TEST_BYTE_0=$3 TEST_BYTE_1=$4 if [ "x$TEST_BYTE_1" = "x" ]; then TEST_BYTE_1="0"; fi TEST_FILE=`echo ${TEST_VALIDITY}_${TEST_NAME} | $SED -e 's/ /_/g' -e 's/\./_/g' -e 's/&/mp/g' -e 's/\[/_/g' -e 's/\]/_/g'`_generated.il echo $TEST_FILE $SED -e "s/VALIDITY/${TEST_VALIDITY}/g" -e "s/BYTE_0/${TEST_BYTE_0}/g" -e "s/BYTE_1/${TEST_BYTE_1}/g" > $TEST_FILE <<//EOF // VALIDITY CIL which breaks the ECMA-335 rules. // this CIL should fail verification by a conforming CLI verifier. .assembly '${TEST_NAME}_generated' { .hash algorithm 0x00008004 .ver 0:0:0:0 } .method public static int32 Main() cil managed { .entrypoint .maxstack 2 .locals init () .emitbyte BYTE_0 .emitbyte BYTE_1 leave end end: ldc.i4.0 ret } //EOF
#! /bin/sh SED="sed" if [ `which gsed 2> /dev/null` ]; then SED="gsed" fi TEST_NAME=$1 TEST_VALIDITY=$2 TEST_BYTE_0=$3 TEST_BYTE_1=$4 if [ "x$TEST_BYTE_1" = "x" ]; then TEST_BYTE_1="0"; fi TEST_FILE=`echo ${TEST_VALIDITY}_${TEST_NAME} | $SED -e 's/ /_/g' -e 's/\./_/g' -e 's/&/mp/g' -e 's/\[/_/g' -e 's/\]/_/g'`_generated.il echo $TEST_FILE $SED -e "s/VALIDITY/${TEST_VALIDITY}/g" -e "s/BYTE_0/${TEST_BYTE_0}/g" -e "s/BYTE_1/${TEST_BYTE_1}/g" > $TEST_FILE <<//EOF // VALIDITY CIL which breaks the ECMA-335 rules. // this CIL should fail verification by a conforming CLI verifier. .assembly '${TEST_NAME}_generated' { .hash algorithm 0x00008004 .ver 0:0:0:0 } .method public static int32 Main() cil managed { .entrypoint .maxstack 2 .locals init () .emitbyte BYTE_0 .emitbyte BYTE_1 leave end end: ldc.i4.0 ret } //EOF
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./eng/common/native/init-compiler.sh
#!/usr/bin/env bash # # This file detects the C/C++ compiler and exports it to the CC/CXX environment variables # # NOTE: some scripts source this file and rely on stdout being empty, make sure to not output anything here! if [[ "$#" -lt 3 ]]; then echo "Usage..." echo "init-compiler.sh <script directory> <Architecture> <compiler>" echo "Specify the script directory." echo "Specify the target architecture." echo "Specify the name of compiler (clang or gcc)." exit 1 fi nativescriptroot="$1" build_arch="$2" compiler="$3" case "$compiler" in clang*|-clang*|--clang*) # clangx.y or clang-x.y version="$(echo "$compiler" | tr -d '[:alpha:]-=')" parts=(${version//./ }) majorVersion="${parts[0]}" minorVersion="${parts[1]}" if [[ -z "$minorVersion" && "$majorVersion" -le 6 ]]; then minorVersion=0; fi compiler=clang ;; gcc*|-gcc*|--gcc*) # gccx.y or gcc-x.y version="$(echo "$compiler" | tr -d '[:alpha:]-=')" parts=(${version//./ }) majorVersion="${parts[0]}" minorVersion="${parts[1]}" compiler=gcc ;; esac cxxCompiler="$compiler++" . "$nativescriptroot"/../pipeline-logging-functions.sh # clear the existing CC and CXX from environment CC= CXX= LDFLAGS= if [[ "$compiler" == "gcc" ]]; then cxxCompiler="g++"; fi check_version_exists() { desired_version=-1 # Set up the environment to be used for building with the desired compiler. if command -v "$compiler-$1.$2" > /dev/null; then desired_version="-$1.$2" elif command -v "$compiler$1$2" > /dev/null; then desired_version="$1$2" elif command -v "$compiler-$1$2" > /dev/null; then desired_version="-$1$2" fi echo "$desired_version" } if [[ -z "$CLR_CC" ]]; then # Set default versions if [[ -z "$majorVersion" ]]; then # note: gcc (all versions) and clang versions higher than 6 do not have minor version in file name, if it is zero. if [[ "$compiler" == "clang" ]]; then versions=( 13 12 11 10 9 8 7 6.0 5.0 4.0 3.9 3.8 3.7 3.6 3.5 ) elif [[ "$compiler" == "gcc" ]]; then versions=( 12 11 10 9 8 7 6 5 4.9 ); fi for version in "${versions[@]}"; do parts=(${version//./ }) desired_version="$(check_version_exists "${parts[0]}" "${parts[1]}")" if [[ "$desired_version" != "-1" ]]; then majorVersion="${parts[0]}"; break; fi done if [[ -z "$majorVersion" ]]; then if command -v "$compiler" > /dev/null; then if [[ "$(uname)" != "Darwin" ]]; then Write-PipelineTelemetryError -category "Build" -type "warning" "Specific version of $compiler not found, falling back to use the one in PATH." fi CC="$(command -v "$compiler")" CXX="$(command -v "$cxxCompiler")" else Write-PipelineTelemetryError -category "Build" "No usable version of $compiler found." exit 1 fi else if [[ "$compiler" == "clang" && "$majorVersion" -lt 5 ]]; then if [[ "$build_arch" == "arm" || "$build_arch" == "armel" ]]; then if command -v "$compiler" > /dev/null; then Write-PipelineTelemetryError -category "Build" -type "warning" "Found clang version $majorVersion which is not supported on arm/armel architectures, falling back to use clang from PATH." CC="$(command -v "$compiler")" CXX="$(command -v "$cxxCompiler")" else Write-PipelineTelemetryError -category "Build" "Found clang version $majorVersion which is not supported on arm/armel architectures, and there is no clang in PATH." exit 1 fi fi fi fi else desired_version="$(check_version_exists "$majorVersion" "$minorVersion")" if [[ "$desired_version" == "-1" ]]; then Write-PipelineTelemetryError -category "Build" "Could not find specific version of $compiler: $majorVersion $minorVersion." exit 1 fi fi if [[ -z "$CC" ]]; then CC="$(command -v "$compiler$desired_version")" CXX="$(command -v "$cxxCompiler$desired_version")" if [[ -z "$CXX" ]]; then CXX="$(command -v "$cxxCompiler")"; fi fi else if [[ ! -f "$CLR_CC" ]]; then Write-PipelineTelemetryError -category "Build" "CLR_CC is set but path '$CLR_CC' does not exist" exit 1 fi CC="$CLR_CC" CXX="$CLR_CXX" fi if [[ -z "$CC" ]]; then Write-PipelineTelemetryError -category "Build" "Unable to find $compiler." exit 1 fi # Only lld version >= 9 can be considered stable if [[ "$compiler" == "clang" && "$majorVersion" -ge 9 ]]; then if "$CC" -fuse-ld=lld -Wl,--version >/dev/null 2>&1; then LDFLAGS="-fuse-ld=lld" fi fi SCAN_BUILD_COMMAND="$(command -v "scan-build$desired_version")" export CC CXX LDFLAGS SCAN_BUILD_COMMAND
#!/usr/bin/env bash # # This file detects the C/C++ compiler and exports it to the CC/CXX environment variables # # NOTE: some scripts source this file and rely on stdout being empty, make sure to not output anything here! if [[ "$#" -lt 3 ]]; then echo "Usage..." echo "init-compiler.sh <script directory> <Architecture> <compiler>" echo "Specify the script directory." echo "Specify the target architecture." echo "Specify the name of compiler (clang or gcc)." exit 1 fi nativescriptroot="$1" build_arch="$2" compiler="$3" case "$compiler" in clang*|-clang*|--clang*) # clangx.y or clang-x.y version="$(echo "$compiler" | tr -d '[:alpha:]-=')" parts=(${version//./ }) majorVersion="${parts[0]}" minorVersion="${parts[1]}" if [[ -z "$minorVersion" && "$majorVersion" -le 6 ]]; then minorVersion=0; fi compiler=clang ;; gcc*|-gcc*|--gcc*) # gccx.y or gcc-x.y version="$(echo "$compiler" | tr -d '[:alpha:]-=')" parts=(${version//./ }) majorVersion="${parts[0]}" minorVersion="${parts[1]}" compiler=gcc ;; esac cxxCompiler="$compiler++" . "$nativescriptroot"/../pipeline-logging-functions.sh # clear the existing CC and CXX from environment CC= CXX= LDFLAGS= if [[ "$compiler" == "gcc" ]]; then cxxCompiler="g++"; fi check_version_exists() { desired_version=-1 # Set up the environment to be used for building with the desired compiler. if command -v "$compiler-$1.$2" > /dev/null; then desired_version="-$1.$2" elif command -v "$compiler$1$2" > /dev/null; then desired_version="$1$2" elif command -v "$compiler-$1$2" > /dev/null; then desired_version="-$1$2" fi echo "$desired_version" } if [[ -z "$CLR_CC" ]]; then # Set default versions if [[ -z "$majorVersion" ]]; then # note: gcc (all versions) and clang versions higher than 6 do not have minor version in file name, if it is zero. if [[ "$compiler" == "clang" ]]; then versions=( 13 12 11 10 9 8 7 6.0 5.0 4.0 3.9 3.8 3.7 3.6 3.5 ) elif [[ "$compiler" == "gcc" ]]; then versions=( 12 11 10 9 8 7 6 5 4.9 ); fi for version in "${versions[@]}"; do parts=(${version//./ }) desired_version="$(check_version_exists "${parts[0]}" "${parts[1]}")" if [[ "$desired_version" != "-1" ]]; then majorVersion="${parts[0]}"; break; fi done if [[ -z "$majorVersion" ]]; then if command -v "$compiler" > /dev/null; then if [[ "$(uname)" != "Darwin" ]]; then Write-PipelineTelemetryError -category "Build" -type "warning" "Specific version of $compiler not found, falling back to use the one in PATH." fi CC="$(command -v "$compiler")" CXX="$(command -v "$cxxCompiler")" else Write-PipelineTelemetryError -category "Build" "No usable version of $compiler found." exit 1 fi else if [[ "$compiler" == "clang" && "$majorVersion" -lt 5 ]]; then if [[ "$build_arch" == "arm" || "$build_arch" == "armel" ]]; then if command -v "$compiler" > /dev/null; then Write-PipelineTelemetryError -category "Build" -type "warning" "Found clang version $majorVersion which is not supported on arm/armel architectures, falling back to use clang from PATH." CC="$(command -v "$compiler")" CXX="$(command -v "$cxxCompiler")" else Write-PipelineTelemetryError -category "Build" "Found clang version $majorVersion which is not supported on arm/armel architectures, and there is no clang in PATH." exit 1 fi fi fi fi else desired_version="$(check_version_exists "$majorVersion" "$minorVersion")" if [[ "$desired_version" == "-1" ]]; then Write-PipelineTelemetryError -category "Build" "Could not find specific version of $compiler: $majorVersion $minorVersion." exit 1 fi fi if [[ -z "$CC" ]]; then CC="$(command -v "$compiler$desired_version")" CXX="$(command -v "$cxxCompiler$desired_version")" if [[ -z "$CXX" ]]; then CXX="$(command -v "$cxxCompiler")"; fi fi else if [[ ! -f "$CLR_CC" ]]; then Write-PipelineTelemetryError -category "Build" "CLR_CC is set but path '$CLR_CC' does not exist" exit 1 fi CC="$CLR_CC" CXX="$CLR_CXX" fi if [[ -z "$CC" ]]; then Write-PipelineTelemetryError -category "Build" "Unable to find $compiler." exit 1 fi # Only lld version >= 9 can be considered stable if [[ "$compiler" == "clang" && "$majorVersion" -ge 9 ]]; then if "$CC" -fuse-ld=lld -Wl,--version >/dev/null 2>&1; then LDFLAGS="-fuse-ld=lld" fi fi SCAN_BUILD_COMMAND="$(command -v "scan-build$desired_version")" export CC CXX LDFLAGS SCAN_BUILD_COMMAND
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/coreclr/pal/tools/setup-ubuntuvm.sh
#!/usr/bin/env bash echo Installing basic Ubuntu \(VM\) XPlat environment function Install-Packages { echo Installing Packages apt-get install clang -y apt-get install cmake -y } function Enable-Integration-Services { echo Checking for integration services res=$(grep -c "hv_vmbus" /etc/initramfs-tools/modules) if [ $res -eq 0 ] then echo Installing integration services echo hv_vmbus >> /etc/initramfs-tools/modules echo hv_storvsc >> /etc/initramfs-tools/modules echo hv_blkvsc >> /etc/initramfs-tools/modules echo hv_netvsc >> /etc/initramfs-tools/modules else echo Integration Services already installed fi } Install-Packages Enable-Integration-Services
#!/usr/bin/env bash echo Installing basic Ubuntu \(VM\) XPlat environment function Install-Packages { echo Installing Packages apt-get install clang -y apt-get install cmake -y } function Enable-Integration-Services { echo Checking for integration services res=$(grep -c "hv_vmbus" /etc/initramfs-tools/modules) if [ $res -eq 0 ] then echo Installing integration services echo hv_vmbus >> /etc/initramfs-tools/modules echo hv_storvsc >> /etc/initramfs-tools/modules echo hv_blkvsc >> /etc/initramfs-tools/modules echo hv_netvsc >> /etc/initramfs-tools/modules else echo Integration Services already installed fi } Install-Packages Enable-Integration-Services
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./eng/native/functions.cmake
function(clr_unknown_arch) if (WIN32) message(FATAL_ERROR "Only AMD64, ARM64, ARM and I386 are supported. Found: ${CMAKE_SYSTEM_PROCESSOR}") elseif(CLR_CROSS_COMPONENTS_BUILD) message(FATAL_ERROR "Only AMD64, I386 host are supported for linux cross-architecture component. Found: ${CMAKE_SYSTEM_PROCESSOR}") else() message(FATAL_ERROR "Only AMD64, ARMV6, ARM64, LOONGARCH64 and ARM are supported. Found: ${CMAKE_SYSTEM_PROCESSOR}") endif() endfunction() # C to MASM include file translator # This is replacement for the deprecated h2inc tool that used to be part of VS. function(h2inc filename output) file(STRINGS ${filename} lines) get_filename_component(path "${filename}" DIRECTORY) file(RELATIVE_PATH relative_filename "${CLR_REPO_ROOT_DIR}" "${filename}") file(WRITE "${output}" "// File start: ${relative_filename}\n") # Use of NEWLINE_CONSUME is needed for lines with trailing backslash file(STRINGS ${filename} contents NEWLINE_CONSUME) string(REGEX REPLACE "\\\\\n" "\\\\\\\\ \n" contents "${contents}") string(REGEX REPLACE "\n" ";" lines "${contents}") foreach(line IN LISTS lines) string(REGEX REPLACE "\\\\\\\\ " "\\\\" line "${line}") if(line MATCHES "^ *# pragma") # Ignore pragmas continue() endif() if(line MATCHES "^ *# *include *\"(.*)\"") # Expand includes. h2inc("${path}/${CMAKE_MATCH_1}" "${output}") continue() endif() if(line MATCHES "^ *#define +([0-9A-Za-z_()]+) *(.*)") # Augment #defines with their MASM equivalent set(name "${CMAKE_MATCH_1}") set(value "${CMAKE_MATCH_2}") # Note that we do not handle multiline constants # Strip comments from value string(REGEX REPLACE "//.*" "" value "${value}") string(REGEX REPLACE "/\\*.*\\*/" "" value "${value}") # Strip whitespaces from value string(REPLACE " +$" "" value "${value}") # ignore #defines with arguments if(NOT "${name}" MATCHES "\\(") set(HEX_NUMBER_PATTERN "0x([0-9A-Fa-f]+)") set(DECIMAL_NUMBER_PATTERN "(-?[0-9]+)") if("${value}" MATCHES "${HEX_NUMBER_PATTERN}") string(REGEX REPLACE "${HEX_NUMBER_PATTERN}" "0\\1h" value "${value}") # Convert hex constants file(APPEND "${output}" "${name} EQU ${value}\n") elseif("${value}" MATCHES "${DECIMAL_NUMBER_PATTERN}" AND (NOT "${value}" MATCHES "[G-Zg-z]+" OR "${value}" MATCHES "\\(")) string(REGEX REPLACE "${DECIMAL_NUMBER_PATTERN}" "\\1t" value "${value}") # Convert dec constants file(APPEND "${output}" "${name} EQU ${value}\n") else() file(APPEND "${output}" "${name} TEXTEQU <${value}>\n") endif() endif() endif() file(APPEND "${output}" "${line}\n") endforeach() file(APPEND "${output}" "// File end: ${relative_filename}\n") endfunction() # Build a list of compiler definitions by putting -D in front of each define. function(get_compile_definitions DefinitionName) # Get the current list of definitions get_directory_property(COMPILE_DEFINITIONS_LIST COMPILE_DEFINITIONS) # The entries that contain generator expressions must have the -D inside of the # expression. So we transform e.g. $<$<CONFIG:Debug>:_DEBUG> to $<$<CONFIG:Debug>:-D_DEBUG> # CMake's support for multiple values within a single generator expression is somewhat ad-hoc. # Since we have a number of complex generator expressions, we use them with multiple values to ensure that # we don't forget to update all of the generator expressions if one needs to be updated. # As a result, we need to expand out the multi-valued generator expression to wrap each individual value here. # Otherwise, CMake will fail to expand it. set(LastGeneratorExpression "") foreach(DEFINITION IN LISTS COMPILE_DEFINITIONS_LIST) # If there is a definition that uses the $<TARGET_PROPERTY:prop> generator expression # we need to remove it since that generator expression is only valid on binary targets. # Assume that the value is 0. string(REGEX REPLACE "\\$<TARGET_PROPERTY:[^,>]+>" "0" DEFINITION "${DEFINITION}") if (${DEFINITION} MATCHES "^\\$<(.+):([^>]+)(>?)$") if("${CMAKE_MATCH_3}" STREQUAL "") set(DEFINITION "$<${CMAKE_MATCH_1}:-D${CMAKE_MATCH_2}>") set(LastGeneratorExpression "${CMAKE_MATCH_1}") else() set(DEFINITION "$<${CMAKE_MATCH_1}:-D${CMAKE_MATCH_2}>") endif() elseif(${DEFINITION} MATCHES "([^>]+)>$") # This entry is the last in a list nested within a generator expression. set(DEFINITION "$<${LastGeneratorExpression}:-D${CMAKE_MATCH_1}>") set(LastGeneratorExpression "") elseif(NOT "${LastGeneratorExpression}" STREQUAL "") set(DEFINITION "$<${LastGeneratorExpression}:-D${DEFINITION}>") else() set(DEFINITION -D${DEFINITION}) endif() list(APPEND DEFINITIONS ${DEFINITION}) endforeach() set(${DefinitionName} ${DEFINITIONS} PARENT_SCOPE) endfunction(get_compile_definitions) # Build a list of include directories function(get_include_directories IncludeDirectories) get_directory_property(dirs INCLUDE_DIRECTORIES) foreach(dir IN LISTS dirs) if (CLR_CMAKE_HOST_ARCH_ARM AND WIN32) list(APPEND INC_DIRECTORIES /I${dir}) else() list(APPEND INC_DIRECTORIES -I${dir}) endif(CLR_CMAKE_HOST_ARCH_ARM AND WIN32) endforeach() set(${IncludeDirectories} ${INC_DIRECTORIES} PARENT_SCOPE) endfunction(get_include_directories) # Build a list of include directories for consumption by the assembler function(get_include_directories_asm IncludeDirectories) get_directory_property(dirs INCLUDE_DIRECTORIES) foreach(dir IN LISTS dirs) list(APPEND INC_DIRECTORIES -I${dir};) endforeach() set(${IncludeDirectories} ${INC_DIRECTORIES} PARENT_SCOPE) endfunction(get_include_directories_asm) # Adds prefix to paths list function(addprefix var prefix list) set(f) foreach(i ${list}) set(f ${f} ${prefix}/${i}) endforeach() set(${var} ${f} PARENT_SCOPE) endfunction() # Finds and returns unwind libs function(find_unwind_libs UnwindLibs) if(CLR_CMAKE_HOST_ARCH_ARM) find_library(UNWIND_ARCH NAMES unwind-arm) endif() if(CLR_CMAKE_HOST_ARCH_ARMV6) find_library(UNWIND_ARCH NAMES unwind-arm) endif() if(CLR_CMAKE_HOST_ARCH_ARM64) find_library(UNWIND_ARCH NAMES unwind-aarch64) endif() if(CLR_CMAKE_HOST_ARCH_LOONGARCH64) find_library(UNWIND_ARCH NAMES unwind-loongarch64) endif() if(CLR_CMAKE_HOST_ARCH_AMD64) find_library(UNWIND_ARCH NAMES unwind-x86_64) endif() if(CLR_CMAKE_HOST_ARCH_S390X) find_library(UNWIND_ARCH NAMES unwind-s390x) endif() if(NOT UNWIND_ARCH STREQUAL UNWIND_ARCH-NOTFOUND) set(UNWIND_LIBS ${UNWIND_ARCH}) endif() find_library(UNWIND_GENERIC NAMES unwind-generic) if(NOT UNWIND_GENERIC STREQUAL UNWIND_GENERIC-NOTFOUND) set(UNWIND_LIBS ${UNWIND_LIBS} ${UNWIND_GENERIC}) endif() find_library(UNWIND NAMES unwind) if(UNWIND STREQUAL UNWIND-NOTFOUND) message(FATAL_ERROR "Cannot find libunwind. Try installing libunwind8-dev or libunwind-devel.") endif() set(${UnwindLibs} ${UNWIND_LIBS} ${UNWIND} PARENT_SCOPE) endfunction(find_unwind_libs) # Set the passed in RetSources variable to the list of sources with added current source directory # to form absolute paths. # The parameters after the RetSources are the input files. function(convert_to_absolute_path RetSources) set(Sources ${ARGN}) foreach(Source IN LISTS Sources) list(APPEND AbsolutePathSources ${CMAKE_CURRENT_SOURCE_DIR}/${Source}) endforeach() set(${RetSources} ${AbsolutePathSources} PARENT_SCOPE) endfunction(convert_to_absolute_path) #Preprocess file function(preprocess_file inputFilename outputFilename) get_compile_definitions(PREPROCESS_DEFINITIONS) get_include_directories(PREPROCESS_INCLUDE_DIRECTORIES) if (MSVC) add_custom_command( OUTPUT ${outputFilename} COMMAND ${CMAKE_CXX_COMPILER} ${PREPROCESS_INCLUDE_DIRECTORIES} /P /EP /TC ${PREPROCESS_DEFINITIONS} /Fi${outputFilename} ${inputFilename} /nologo DEPENDS ${inputFilename} COMMENT "Preprocessing ${inputFilename}. Outputting to ${outputFilename}" ) else() add_custom_command( OUTPUT ${outputFilename} COMMAND ${CMAKE_CXX_COMPILER} -E -P ${PREPROCESS_DEFINITIONS} ${PREPROCESS_INCLUDE_DIRECTORIES} -o ${outputFilename} -x c ${inputFilename} DEPENDS ${inputFilename} COMMENT "Preprocessing ${inputFilename}. Outputting to ${outputFilename}" ) endif() set_source_files_properties(${outputFilename} PROPERTIES GENERATED TRUE) endfunction() # preprocess_files(PreprocessedFilesList [fileToPreprocess1 [fileToPreprocess2 ...]]) function(preprocess_files PreprocessedFilesList) set(FilesToPreprocess ${ARGN}) foreach(ASM_FILE IN LISTS FilesToPreprocess) # Inserts a custom command in CMake build to preprocess each asm source file get_filename_component(name ${ASM_FILE} NAME_WE) file(TO_CMAKE_PATH "${CMAKE_CURRENT_BINARY_DIR}/${name}.asm" ASM_PREPROCESSED_FILE) preprocess_file(${ASM_FILE} ${ASM_PREPROCESSED_FILE}) list(APPEND PreprocessedFiles ${ASM_PREPROCESSED_FILE}) endforeach() set(${PreprocessedFilesList} ${PreprocessedFiles} PARENT_SCOPE) endfunction() function(set_exports_linker_option exports_filename) if(LD_GNU OR LD_SOLARIS OR LD_LLVM) # Add linker exports file option if(LD_SOLARIS) set(EXPORTS_LINKER_OPTION -Wl,-M,${exports_filename} PARENT_SCOPE) else() set(EXPORTS_LINKER_OPTION -Wl,--version-script=${exports_filename} PARENT_SCOPE) endif() elseif(LD_OSX) # Add linker exports file option set(EXPORTS_LINKER_OPTION -Wl,-exported_symbols_list,${exports_filename} PARENT_SCOPE) endif() endfunction() # compile_asm(TARGET target ASM_FILES file1 [file2 ...] OUTPUT_OBJECTS [variableName]) # CMake does not support the ARM or ARM64 assemblers on Windows when using the # MSBuild generator. When the MSBuild generator is in use, we manually compile the assembly files # using this function. function(compile_asm) set(options "") set(oneValueArgs TARGET OUTPUT_OBJECTS) set(multiValueArgs ASM_FILES) cmake_parse_arguments(COMPILE_ASM "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGV}) get_include_directories_asm(ASM_INCLUDE_DIRECTORIES) set (ASSEMBLED_OBJECTS "") foreach(ASM_FILE ${COMPILE_ASM_ASM_FILES}) get_filename_component(name ${ASM_FILE} NAME_WE) # Produce object file where CMake would store .obj files for an OBJECT library. # ex: artifacts\obj\coreclr\windows.arm64.Debug\src\vm\wks\cee_wks.dir\Debug\AsmHelpers.obj set (OBJ_FILE "${CMAKE_CURRENT_BINARY_DIR}/${COMPILE_ASM_TARGET}.dir/${CMAKE_CFG_INTDIR}/${name}.obj") # Need to compile asm file using custom command as include directories are not provided to asm compiler add_custom_command(OUTPUT ${OBJ_FILE} COMMAND "${CMAKE_ASM_COMPILER}" -g ${ASM_INCLUDE_DIRECTORIES} -o ${OBJ_FILE} ${ASM_FILE} DEPENDS ${ASM_FILE} COMMENT "Assembling ${ASM_FILE} ---> \"${CMAKE_ASM_COMPILER}\" -g ${ASM_INCLUDE_DIRECTORIES} -o ${OBJ_FILE} ${ASM_FILE}") # mark obj as source that does not require compile set_source_files_properties(${OBJ_FILE} PROPERTIES EXTERNAL_OBJECT TRUE) # Add the generated OBJ in the dependency list so that it gets consumed during linkage list(APPEND ASSEMBLED_OBJECTS ${OBJ_FILE}) endforeach() set(${COMPILE_ASM_OUTPUT_OBJECTS} ${ASSEMBLED_OBJECTS} PARENT_SCOPE) endfunction() # add_component(componentName [targetName] [EXCLUDE_FROM_ALL]) function(add_component componentName) if (${ARGC} GREATER 2 OR ${ARGC} EQUAL 2) set(componentTargetName "${ARGV1}") else() set(componentTargetName "${componentName}") endif() if (${ARGC} EQUAL 3 AND "${ARG2}" STREQUAL "EXCLUDE_FROM_ALL") set(exclude_from_all_flag "EXCLUDE_FROM_ALL") endif() get_property(definedComponents GLOBAL PROPERTY CLR_CMAKE_COMPONENTS) list (FIND definedComponents "${componentName}" componentIndex) if (${componentIndex} EQUAL -1) list (APPEND definedComponents "${componentName}") add_custom_target("${componentTargetName}" COMMAND "${CMAKE_COMMAND}" "-DCMAKE_INSTALL_COMPONENT=${componentName}" "-DBUILD_TYPE=$<CONFIG>" -P "${CMAKE_BINARY_DIR}/cmake_install.cmake" ${exclude_from_all_flag}) set_property(GLOBAL PROPERTY CLR_CMAKE_COMPONENTS ${definedComponents}) endif() endfunction() function(generate_exports_file) set(INPUT_LIST ${ARGN}) list(GET INPUT_LIST -1 outputFilename) list(REMOVE_AT INPUT_LIST -1) if(CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS) set(SCRIPT_NAME generateexportedsymbols.sh) else() set(SCRIPT_NAME generateversionscript.sh) endif() add_custom_command( OUTPUT ${outputFilename} COMMAND ${CLR_ENG_NATIVE_DIR}/${SCRIPT_NAME} ${INPUT_LIST} >${outputFilename} DEPENDS ${INPUT_LIST} ${CLR_ENG_NATIVE_DIR}/${SCRIPT_NAME} COMMENT "Generating exports file ${outputFilename}" ) set_source_files_properties(${outputFilename} PROPERTIES GENERATED TRUE) endfunction() function(generate_exports_file_prefix inputFilename outputFilename prefix) if(CMAKE_SYSTEM_NAME STREQUAL Darwin) set(SCRIPT_NAME generateexportedsymbols.sh) else() set(SCRIPT_NAME generateversionscript.sh) if (NOT ${prefix} STREQUAL "") set(EXTRA_ARGS ${prefix}) endif() endif(CMAKE_SYSTEM_NAME STREQUAL Darwin) add_custom_command( OUTPUT ${outputFilename} COMMAND ${CLR_ENG_NATIVE_DIR}/${SCRIPT_NAME} ${inputFilename} ${EXTRA_ARGS} >${outputFilename} DEPENDS ${inputFilename} ${CLR_ENG_NATIVE_DIR}/${SCRIPT_NAME} COMMENT "Generating exports file ${outputFilename}" ) set_source_files_properties(${outputFilename} PROPERTIES GENERATED TRUE) endfunction() function (get_symbol_file_name targetName outputSymbolFilename) if (CLR_CMAKE_HOST_UNIX) if (CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS) set(strip_destination_file $<TARGET_FILE:${targetName}>.dwarf) else () set(strip_destination_file $<TARGET_FILE:${targetName}>.dbg) endif () set(${outputSymbolFilename} ${strip_destination_file} PARENT_SCOPE) else(CLR_CMAKE_HOST_UNIX) # We can't use the $<TARGET_PDB_FILE> generator expression here since # the generator expression isn't supported on resource DLLs. set(${outputSymbolFilename} $<TARGET_FILE_DIR:${targetName}>/$<TARGET_FILE_PREFIX:${targetName}>$<TARGET_FILE_BASE_NAME:${targetName}>.pdb PARENT_SCOPE) endif(CLR_CMAKE_HOST_UNIX) endfunction() function(strip_symbols targetName outputFilename) get_symbol_file_name(${targetName} strip_destination_file) set(${outputFilename} ${strip_destination_file} PARENT_SCOPE) if (CLR_CMAKE_HOST_UNIX) set(strip_source_file $<TARGET_FILE:${targetName}>) if (CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS) # Ensure that dsymutil and strip are present find_program(DSYMUTIL dsymutil) if (DSYMUTIL STREQUAL "DSYMUTIL-NOTFOUND") message(FATAL_ERROR "dsymutil not found") endif() find_program(STRIP strip) if (STRIP STREQUAL "STRIP-NOTFOUND") message(FATAL_ERROR "strip not found") endif() string(TOLOWER "${CMAKE_BUILD_TYPE}" LOWERCASE_CMAKE_BUILD_TYPE) if (LOWERCASE_CMAKE_BUILD_TYPE STREQUAL release) set(strip_command ${STRIP} -no_code_signature_warning -S ${strip_source_file} && codesign -f -s - ${strip_source_file}) else () set(strip_command) endif () add_custom_command( TARGET ${targetName} POST_BUILD VERBATIM COMMAND ${DSYMUTIL} --flat --minimize ${strip_source_file} COMMAND ${strip_command} COMMENT "Stripping symbols from ${strip_source_file} into file ${strip_destination_file}" ) else (CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS) add_custom_command( TARGET ${targetName} POST_BUILD VERBATIM COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${strip_source_file} ${strip_destination_file} COMMAND ${CMAKE_OBJCOPY} --strip-unneeded ${strip_source_file} COMMAND ${CMAKE_OBJCOPY} --add-gnu-debuglink=${strip_destination_file} ${strip_source_file} COMMENT "Stripping symbols from ${strip_source_file} into file ${strip_destination_file}" ) endif (CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS) endif(CLR_CMAKE_HOST_UNIX) endfunction() function(install_with_stripped_symbols targetName kind destination) if(NOT CLR_CMAKE_KEEP_NATIVE_SYMBOLS) strip_symbols(${targetName} symbol_file) install_symbol_file(${symbol_file} ${destination} ${ARGN}) endif() if ((CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS) AND ("${kind}" STREQUAL "TARGETS")) # We want to avoid the kind=TARGET install behaviors which corrupt code signatures on osx-arm64 set(kind PROGRAMS) endif() if ("${kind}" STREQUAL "TARGETS") set(install_source ${targetName}) elseif("${kind}" STREQUAL "PROGRAMS") set(install_source $<TARGET_FILE:${targetName}>) else() message(FATAL_ERROR "The `kind` argument has to be either TARGETS or PROGRAMS, ${kind} was provided instead") endif() install(${kind} ${install_source} DESTINATION ${destination} ${ARGN}) endfunction() function(install_symbol_file symbol_file destination_path) if(CLR_CMAKE_TARGET_WIN32) install(FILES ${symbol_file} DESTINATION ${destination_path}/PDB ${ARGN}) else() install(FILES ${symbol_file} DESTINATION ${destination_path} ${ARGN}) endif() endfunction() function(install_static_library targetName destination component) if (NOT "${component}" STREQUAL "${targetName}") get_property(definedComponents GLOBAL PROPERTY CLR_CMAKE_COMPONENTS) list(FIND definedComponents "${component}" componentIdx) if (${componentIdx} EQUAL -1) message(FATAL_ERROR "The ${component} component is not defined. Add a call to `add_component(${component})` to define the component in the build.") endif() add_dependencies(${component} ${targetName}) endif() install (TARGETS ${targetName} DESTINATION ${destination} COMPONENT ${component}) if (WIN32) set_target_properties(${targetName} PROPERTIES COMPILE_PDB_NAME "${targetName}" COMPILE_PDB_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}" ) install (FILES "$<TARGET_FILE_DIR:${targetName}>/${targetName}.pdb" DESTINATION ${destination} COMPONENT ${component}) endif() endfunction() # install_clr(TARGETS targetName [targetName2 ...] [DESTINATIONS destination [destination2 ...]] [COMPONENT componentName]) function(install_clr) set(multiValueArgs TARGETS DESTINATIONS) set(singleValueArgs COMPONENT) set(options "") cmake_parse_arguments(INSTALL_CLR "${options}" "${singleValueArgs}" "${multiValueArgs}" ${ARGV}) if ("${INSTALL_CLR_TARGETS}" STREQUAL "") message(FATAL_ERROR "At least one target must be passed to install_clr(TARGETS )") endif() if ("${INSTALL_CLR_DESTINATIONS}" STREQUAL "") message(FATAL_ERROR "At least one destination must be passed to install_clr.") endif() set(destinations "") if (NOT "${INSTALL_CLR_DESTINATIONS}" STREQUAL "") list(APPEND destinations ${INSTALL_CLR_DESTINATIONS}) endif() if ("${INSTALL_CLR_COMPONENT}" STREQUAL "") set(INSTALL_CLR_COMPONENT ${CMAKE_INSTALL_DEFAULT_COMPONENT_NAME}) endif() foreach(targetName ${INSTALL_CLR_TARGETS}) if (NOT "${INSTALL_CLR_COMPONENT}" STREQUAL "${targetName}") get_property(definedComponents GLOBAL PROPERTY CLR_CMAKE_COMPONENTS) list(FIND definedComponents "${INSTALL_CLR_COMPONENT}" componentIdx) if (${componentIdx} EQUAL -1) message(FATAL_ERROR "The ${INSTALL_CLR_COMPONENT} component is not defined. Add a call to `add_component(${INSTALL_CLR_COMPONENT})` to define the component in the build.") endif() add_dependencies(${INSTALL_CLR_COMPONENT} ${targetName}) endif() get_target_property(targetType ${targetName} TYPE) if (NOT CLR_CMAKE_KEEP_NATIVE_SYMBOLS AND NOT "${targetType}" STREQUAL "STATIC_LIBRARY") get_symbol_file_name(${targetName} symbolFile) endif() foreach(destination ${destinations}) # We don't need to install the export libraries for our DLLs # since they won't be directly linked against. install(PROGRAMS $<TARGET_FILE:${targetName}> DESTINATION ${destination} COMPONENT ${INSTALL_CLR_COMPONENT}) if (NOT "${symbolFile}" STREQUAL "") install_symbol_file(${symbolFile} ${destination} COMPONENT ${INSTALL_CLR_COMPONENT}) endif() if(CLR_CMAKE_PGO_INSTRUMENT) if(WIN32) get_property(is_multi_config GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) if(is_multi_config) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/$<CONFIG>/${targetName}.pgd DESTINATION ${destination}/PGD OPTIONAL COMPONENT ${INSTALL_CLR_COMPONENT}) else() install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${targetName}.pgd DESTINATION ${destination}/PGD OPTIONAL COMPONENT ${INSTALL_CLR_COMPONENT}) endif() endif() endif() endforeach() endforeach() endfunction() # Disable PAX mprotect that would prevent JIT and other codegen in coreclr from working. # PAX mprotect prevents: # - changing the executable status of memory pages that were # not originally created as executable, # - making read-only executable pages writable again, # - creating executable pages from anonymous memory, # - making read-only-after-relocations (RELRO) data pages writable again. function(disable_pax_mprotect targetName) # Disabling PAX hardening only makes sense in systems that use Elf image formats. Particularly, looking # for paxctl in macOS is problematic as it collides with popular software for that OS that performs completely # unrelated functionality. Only look for it when we'll generate Elf images. if (CLR_CMAKE_HOST_LINUX OR CLR_CMAKE_HOST_FREEBSD OR CLR_CMAKE_HOST_NETBSD OR CLR_CMAKE_HOST_SUNOS) # Try to locate the paxctl tool. Failure to find it is not fatal, # but the generated executables won't work on a system where PAX is set # to prevent applications to create executable memory mappings. find_program(PAXCTL paxctl) if (NOT PAXCTL STREQUAL "PAXCTL-NOTFOUND") add_custom_command( TARGET ${targetName} POST_BUILD VERBATIM COMMAND ${PAXCTL} -c -m $<TARGET_FILE:${targetName}> ) endif() endif(CLR_CMAKE_HOST_LINUX OR CLR_CMAKE_HOST_FREEBSD OR CLR_CMAKE_HOST_NETBSD OR CLR_CMAKE_HOST_SUNOS) endfunction() if (CMAKE_VERSION VERSION_LESS "3.12") # Polyfill add_compile_definitions when it is unavailable function(add_compile_definitions) get_directory_property(DIR_COMPILE_DEFINITIONS COMPILE_DEFINITIONS) list(APPEND DIR_COMPILE_DEFINITIONS ${ARGV}) set_directory_properties(PROPERTIES COMPILE_DEFINITIONS "${DIR_COMPILE_DEFINITIONS}") endfunction() endif() if (CMAKE_VERSION VERSION_LESS "3.16") # Provide a no-op polyfill for precompiled headers on old CMake versions function(target_precompile_headers) endfunction() endif() # add_linker_flag(Flag [Config1 Config2 ...]) function(add_linker_flag Flag) if (ARGN STREQUAL "") set("CMAKE_EXE_LINKER_FLAGS" "${CMAKE_EXE_LINKER_FLAGS} ${Flag}" PARENT_SCOPE) set("CMAKE_SHARED_LINKER_FLAGS" "${CMAKE_SHARED_LINKER_FLAGS} ${Flag}" PARENT_SCOPE) else() foreach(Config ${ARGN}) set("CMAKE_EXE_LINKER_FLAGS_${Config}" "${CMAKE_EXE_LINKER_FLAGS_${Config}} ${Flag}" PARENT_SCOPE) set("CMAKE_SHARED_LINKER_FLAGS_${Config}" "${CMAKE_SHARED_LINKER_FLAGS_${Config}} ${Flag}" PARENT_SCOPE) endforeach() endif() endfunction() function(link_natvis_sources_for_target targetName linkKind) if (NOT CLR_CMAKE_HOST_WIN32) return() endif() foreach(source ${ARGN}) if (NOT IS_ABSOLUTE "${source}") convert_to_absolute_path(source ${source}) endif() get_filename_component(extension "${source}" EXT) if ("${extension}" STREQUAL ".natvis") message("Embedding natvis ${source}") # Since natvis embedding is only supported on Windows # we can use target_link_options since our minimum version is high enough target_link_options(${targetName} "${linkKind}" "-NATVIS:${source}") endif() endforeach() endfunction() function(add_executable_clr targetName) if(NOT WIN32) add_executable(${ARGV} ${VERSION_FILE_PATH}) disable_pax_mprotect(${ARGV}) else() add_executable(${ARGV}) endif(NOT WIN32) if(NOT CLR_CMAKE_KEEP_NATIVE_SYMBOLS) strip_symbols(${ARGV0} symbolFile) endif() endfunction() function(add_library_clr targetName kind) if(NOT WIN32 AND "${kind}" STREQUAL "SHARED") add_library(${ARGV} ${VERSION_FILE_PATH}) else() add_library(${ARGV}) endif() if("${kind}" STREQUAL "SHARED" AND NOT CLR_CMAKE_KEEP_NATIVE_SYMBOLS) strip_symbols(${ARGV0} symbolFile) endif() endfunction()
function(clr_unknown_arch) if (WIN32) message(FATAL_ERROR "Only AMD64, ARM64, ARM and I386 are supported. Found: ${CMAKE_SYSTEM_PROCESSOR}") elseif(CLR_CROSS_COMPONENTS_BUILD) message(FATAL_ERROR "Only AMD64, I386 host are supported for linux cross-architecture component. Found: ${CMAKE_SYSTEM_PROCESSOR}") else() message(FATAL_ERROR "Only AMD64, ARMV6, ARM64, LOONGARCH64 and ARM are supported. Found: ${CMAKE_SYSTEM_PROCESSOR}") endif() endfunction() # C to MASM include file translator # This is replacement for the deprecated h2inc tool that used to be part of VS. function(h2inc filename output) file(STRINGS ${filename} lines) get_filename_component(path "${filename}" DIRECTORY) file(RELATIVE_PATH relative_filename "${CLR_REPO_ROOT_DIR}" "${filename}") file(WRITE "${output}" "// File start: ${relative_filename}\n") # Use of NEWLINE_CONSUME is needed for lines with trailing backslash file(STRINGS ${filename} contents NEWLINE_CONSUME) string(REGEX REPLACE "\\\\\n" "\\\\\\\\ \n" contents "${contents}") string(REGEX REPLACE "\n" ";" lines "${contents}") foreach(line IN LISTS lines) string(REGEX REPLACE "\\\\\\\\ " "\\\\" line "${line}") if(line MATCHES "^ *# pragma") # Ignore pragmas continue() endif() if(line MATCHES "^ *# *include *\"(.*)\"") # Expand includes. h2inc("${path}/${CMAKE_MATCH_1}" "${output}") continue() endif() if(line MATCHES "^ *#define +([0-9A-Za-z_()]+) *(.*)") # Augment #defines with their MASM equivalent set(name "${CMAKE_MATCH_1}") set(value "${CMAKE_MATCH_2}") # Note that we do not handle multiline constants # Strip comments from value string(REGEX REPLACE "//.*" "" value "${value}") string(REGEX REPLACE "/\\*.*\\*/" "" value "${value}") # Strip whitespaces from value string(REPLACE " +$" "" value "${value}") # ignore #defines with arguments if(NOT "${name}" MATCHES "\\(") set(HEX_NUMBER_PATTERN "0x([0-9A-Fa-f]+)") set(DECIMAL_NUMBER_PATTERN "(-?[0-9]+)") if("${value}" MATCHES "${HEX_NUMBER_PATTERN}") string(REGEX REPLACE "${HEX_NUMBER_PATTERN}" "0\\1h" value "${value}") # Convert hex constants file(APPEND "${output}" "${name} EQU ${value}\n") elseif("${value}" MATCHES "${DECIMAL_NUMBER_PATTERN}" AND (NOT "${value}" MATCHES "[G-Zg-z]+" OR "${value}" MATCHES "\\(")) string(REGEX REPLACE "${DECIMAL_NUMBER_PATTERN}" "\\1t" value "${value}") # Convert dec constants file(APPEND "${output}" "${name} EQU ${value}\n") else() file(APPEND "${output}" "${name} TEXTEQU <${value}>\n") endif() endif() endif() file(APPEND "${output}" "${line}\n") endforeach() file(APPEND "${output}" "// File end: ${relative_filename}\n") endfunction() # Build a list of compiler definitions by putting -D in front of each define. function(get_compile_definitions DefinitionName) # Get the current list of definitions get_directory_property(COMPILE_DEFINITIONS_LIST COMPILE_DEFINITIONS) # The entries that contain generator expressions must have the -D inside of the # expression. So we transform e.g. $<$<CONFIG:Debug>:_DEBUG> to $<$<CONFIG:Debug>:-D_DEBUG> # CMake's support for multiple values within a single generator expression is somewhat ad-hoc. # Since we have a number of complex generator expressions, we use them with multiple values to ensure that # we don't forget to update all of the generator expressions if one needs to be updated. # As a result, we need to expand out the multi-valued generator expression to wrap each individual value here. # Otherwise, CMake will fail to expand it. set(LastGeneratorExpression "") foreach(DEFINITION IN LISTS COMPILE_DEFINITIONS_LIST) # If there is a definition that uses the $<TARGET_PROPERTY:prop> generator expression # we need to remove it since that generator expression is only valid on binary targets. # Assume that the value is 0. string(REGEX REPLACE "\\$<TARGET_PROPERTY:[^,>]+>" "0" DEFINITION "${DEFINITION}") if (${DEFINITION} MATCHES "^\\$<(.+):([^>]+)(>?)$") if("${CMAKE_MATCH_3}" STREQUAL "") set(DEFINITION "$<${CMAKE_MATCH_1}:-D${CMAKE_MATCH_2}>") set(LastGeneratorExpression "${CMAKE_MATCH_1}") else() set(DEFINITION "$<${CMAKE_MATCH_1}:-D${CMAKE_MATCH_2}>") endif() elseif(${DEFINITION} MATCHES "([^>]+)>$") # This entry is the last in a list nested within a generator expression. set(DEFINITION "$<${LastGeneratorExpression}:-D${CMAKE_MATCH_1}>") set(LastGeneratorExpression "") elseif(NOT "${LastGeneratorExpression}" STREQUAL "") set(DEFINITION "$<${LastGeneratorExpression}:-D${DEFINITION}>") else() set(DEFINITION -D${DEFINITION}) endif() list(APPEND DEFINITIONS ${DEFINITION}) endforeach() set(${DefinitionName} ${DEFINITIONS} PARENT_SCOPE) endfunction(get_compile_definitions) # Build a list of include directories function(get_include_directories IncludeDirectories) get_directory_property(dirs INCLUDE_DIRECTORIES) foreach(dir IN LISTS dirs) if (CLR_CMAKE_HOST_ARCH_ARM AND WIN32) list(APPEND INC_DIRECTORIES /I${dir}) else() list(APPEND INC_DIRECTORIES -I${dir}) endif(CLR_CMAKE_HOST_ARCH_ARM AND WIN32) endforeach() set(${IncludeDirectories} ${INC_DIRECTORIES} PARENT_SCOPE) endfunction(get_include_directories) # Build a list of include directories for consumption by the assembler function(get_include_directories_asm IncludeDirectories) get_directory_property(dirs INCLUDE_DIRECTORIES) foreach(dir IN LISTS dirs) list(APPEND INC_DIRECTORIES -I${dir};) endforeach() set(${IncludeDirectories} ${INC_DIRECTORIES} PARENT_SCOPE) endfunction(get_include_directories_asm) # Adds prefix to paths list function(addprefix var prefix list) set(f) foreach(i ${list}) set(f ${f} ${prefix}/${i}) endforeach() set(${var} ${f} PARENT_SCOPE) endfunction() # Finds and returns unwind libs function(find_unwind_libs UnwindLibs) if(CLR_CMAKE_HOST_ARCH_ARM) find_library(UNWIND_ARCH NAMES unwind-arm) endif() if(CLR_CMAKE_HOST_ARCH_ARMV6) find_library(UNWIND_ARCH NAMES unwind-arm) endif() if(CLR_CMAKE_HOST_ARCH_ARM64) find_library(UNWIND_ARCH NAMES unwind-aarch64) endif() if(CLR_CMAKE_HOST_ARCH_LOONGARCH64) find_library(UNWIND_ARCH NAMES unwind-loongarch64) endif() if(CLR_CMAKE_HOST_ARCH_AMD64) find_library(UNWIND_ARCH NAMES unwind-x86_64) endif() if(CLR_CMAKE_HOST_ARCH_S390X) find_library(UNWIND_ARCH NAMES unwind-s390x) endif() if(NOT UNWIND_ARCH STREQUAL UNWIND_ARCH-NOTFOUND) set(UNWIND_LIBS ${UNWIND_ARCH}) endif() find_library(UNWIND_GENERIC NAMES unwind-generic) if(NOT UNWIND_GENERIC STREQUAL UNWIND_GENERIC-NOTFOUND) set(UNWIND_LIBS ${UNWIND_LIBS} ${UNWIND_GENERIC}) endif() find_library(UNWIND NAMES unwind) if(UNWIND STREQUAL UNWIND-NOTFOUND) message(FATAL_ERROR "Cannot find libunwind. Try installing libunwind8-dev or libunwind-devel.") endif() set(${UnwindLibs} ${UNWIND_LIBS} ${UNWIND} PARENT_SCOPE) endfunction(find_unwind_libs) # Set the passed in RetSources variable to the list of sources with added current source directory # to form absolute paths. # The parameters after the RetSources are the input files. function(convert_to_absolute_path RetSources) set(Sources ${ARGN}) foreach(Source IN LISTS Sources) list(APPEND AbsolutePathSources ${CMAKE_CURRENT_SOURCE_DIR}/${Source}) endforeach() set(${RetSources} ${AbsolutePathSources} PARENT_SCOPE) endfunction(convert_to_absolute_path) #Preprocess file function(preprocess_file inputFilename outputFilename) get_compile_definitions(PREPROCESS_DEFINITIONS) get_include_directories(PREPROCESS_INCLUDE_DIRECTORIES) if (MSVC) add_custom_command( OUTPUT ${outputFilename} COMMAND ${CMAKE_CXX_COMPILER} ${PREPROCESS_INCLUDE_DIRECTORIES} /P /EP /TC ${PREPROCESS_DEFINITIONS} /Fi${outputFilename} ${inputFilename} /nologo DEPENDS ${inputFilename} COMMENT "Preprocessing ${inputFilename}. Outputting to ${outputFilename}" ) else() add_custom_command( OUTPUT ${outputFilename} COMMAND ${CMAKE_CXX_COMPILER} -E -P ${PREPROCESS_DEFINITIONS} ${PREPROCESS_INCLUDE_DIRECTORIES} -o ${outputFilename} -x c ${inputFilename} DEPENDS ${inputFilename} COMMENT "Preprocessing ${inputFilename}. Outputting to ${outputFilename}" ) endif() set_source_files_properties(${outputFilename} PROPERTIES GENERATED TRUE) endfunction() # preprocess_files(PreprocessedFilesList [fileToPreprocess1 [fileToPreprocess2 ...]]) function(preprocess_files PreprocessedFilesList) set(FilesToPreprocess ${ARGN}) foreach(ASM_FILE IN LISTS FilesToPreprocess) # Inserts a custom command in CMake build to preprocess each asm source file get_filename_component(name ${ASM_FILE} NAME_WE) file(TO_CMAKE_PATH "${CMAKE_CURRENT_BINARY_DIR}/${name}.asm" ASM_PREPROCESSED_FILE) preprocess_file(${ASM_FILE} ${ASM_PREPROCESSED_FILE}) list(APPEND PreprocessedFiles ${ASM_PREPROCESSED_FILE}) endforeach() set(${PreprocessedFilesList} ${PreprocessedFiles} PARENT_SCOPE) endfunction() function(set_exports_linker_option exports_filename) if(LD_GNU OR LD_SOLARIS OR LD_LLVM) # Add linker exports file option if(LD_SOLARIS) set(EXPORTS_LINKER_OPTION -Wl,-M,${exports_filename} PARENT_SCOPE) else() set(EXPORTS_LINKER_OPTION -Wl,--version-script=${exports_filename} PARENT_SCOPE) endif() elseif(LD_OSX) # Add linker exports file option set(EXPORTS_LINKER_OPTION -Wl,-exported_symbols_list,${exports_filename} PARENT_SCOPE) endif() endfunction() # compile_asm(TARGET target ASM_FILES file1 [file2 ...] OUTPUT_OBJECTS [variableName]) # CMake does not support the ARM or ARM64 assemblers on Windows when using the # MSBuild generator. When the MSBuild generator is in use, we manually compile the assembly files # using this function. function(compile_asm) set(options "") set(oneValueArgs TARGET OUTPUT_OBJECTS) set(multiValueArgs ASM_FILES) cmake_parse_arguments(COMPILE_ASM "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGV}) get_include_directories_asm(ASM_INCLUDE_DIRECTORIES) set (ASSEMBLED_OBJECTS "") foreach(ASM_FILE ${COMPILE_ASM_ASM_FILES}) get_filename_component(name ${ASM_FILE} NAME_WE) # Produce object file where CMake would store .obj files for an OBJECT library. # ex: artifacts\obj\coreclr\windows.arm64.Debug\src\vm\wks\cee_wks.dir\Debug\AsmHelpers.obj set (OBJ_FILE "${CMAKE_CURRENT_BINARY_DIR}/${COMPILE_ASM_TARGET}.dir/${CMAKE_CFG_INTDIR}/${name}.obj") # Need to compile asm file using custom command as include directories are not provided to asm compiler add_custom_command(OUTPUT ${OBJ_FILE} COMMAND "${CMAKE_ASM_COMPILER}" -g ${ASM_INCLUDE_DIRECTORIES} -o ${OBJ_FILE} ${ASM_FILE} DEPENDS ${ASM_FILE} COMMENT "Assembling ${ASM_FILE} ---> \"${CMAKE_ASM_COMPILER}\" -g ${ASM_INCLUDE_DIRECTORIES} -o ${OBJ_FILE} ${ASM_FILE}") # mark obj as source that does not require compile set_source_files_properties(${OBJ_FILE} PROPERTIES EXTERNAL_OBJECT TRUE) # Add the generated OBJ in the dependency list so that it gets consumed during linkage list(APPEND ASSEMBLED_OBJECTS ${OBJ_FILE}) endforeach() set(${COMPILE_ASM_OUTPUT_OBJECTS} ${ASSEMBLED_OBJECTS} PARENT_SCOPE) endfunction() # add_component(componentName [targetName] [EXCLUDE_FROM_ALL]) function(add_component componentName) if (${ARGC} GREATER 2 OR ${ARGC} EQUAL 2) set(componentTargetName "${ARGV1}") else() set(componentTargetName "${componentName}") endif() if (${ARGC} EQUAL 3 AND "${ARG2}" STREQUAL "EXCLUDE_FROM_ALL") set(exclude_from_all_flag "EXCLUDE_FROM_ALL") endif() get_property(definedComponents GLOBAL PROPERTY CLR_CMAKE_COMPONENTS) list (FIND definedComponents "${componentName}" componentIndex) if (${componentIndex} EQUAL -1) list (APPEND definedComponents "${componentName}") add_custom_target("${componentTargetName}" COMMAND "${CMAKE_COMMAND}" "-DCMAKE_INSTALL_COMPONENT=${componentName}" "-DBUILD_TYPE=$<CONFIG>" -P "${CMAKE_BINARY_DIR}/cmake_install.cmake" ${exclude_from_all_flag}) set_property(GLOBAL PROPERTY CLR_CMAKE_COMPONENTS ${definedComponents}) endif() endfunction() function(generate_exports_file) set(INPUT_LIST ${ARGN}) list(GET INPUT_LIST -1 outputFilename) list(REMOVE_AT INPUT_LIST -1) if(CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS) set(SCRIPT_NAME generateexportedsymbols.sh) else() set(SCRIPT_NAME generateversionscript.sh) endif() add_custom_command( OUTPUT ${outputFilename} COMMAND ${CLR_ENG_NATIVE_DIR}/${SCRIPT_NAME} ${INPUT_LIST} >${outputFilename} DEPENDS ${INPUT_LIST} ${CLR_ENG_NATIVE_DIR}/${SCRIPT_NAME} COMMENT "Generating exports file ${outputFilename}" ) set_source_files_properties(${outputFilename} PROPERTIES GENERATED TRUE) endfunction() function(generate_exports_file_prefix inputFilename outputFilename prefix) if(CMAKE_SYSTEM_NAME STREQUAL Darwin) set(SCRIPT_NAME generateexportedsymbols.sh) else() set(SCRIPT_NAME generateversionscript.sh) if (NOT ${prefix} STREQUAL "") set(EXTRA_ARGS ${prefix}) endif() endif(CMAKE_SYSTEM_NAME STREQUAL Darwin) add_custom_command( OUTPUT ${outputFilename} COMMAND ${CLR_ENG_NATIVE_DIR}/${SCRIPT_NAME} ${inputFilename} ${EXTRA_ARGS} >${outputFilename} DEPENDS ${inputFilename} ${CLR_ENG_NATIVE_DIR}/${SCRIPT_NAME} COMMENT "Generating exports file ${outputFilename}" ) set_source_files_properties(${outputFilename} PROPERTIES GENERATED TRUE) endfunction() function (get_symbol_file_name targetName outputSymbolFilename) if (CLR_CMAKE_HOST_UNIX) if (CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS) set(strip_destination_file $<TARGET_FILE:${targetName}>.dwarf) else () set(strip_destination_file $<TARGET_FILE:${targetName}>.dbg) endif () set(${outputSymbolFilename} ${strip_destination_file} PARENT_SCOPE) else(CLR_CMAKE_HOST_UNIX) # We can't use the $<TARGET_PDB_FILE> generator expression here since # the generator expression isn't supported on resource DLLs. set(${outputSymbolFilename} $<TARGET_FILE_DIR:${targetName}>/$<TARGET_FILE_PREFIX:${targetName}>$<TARGET_FILE_BASE_NAME:${targetName}>.pdb PARENT_SCOPE) endif(CLR_CMAKE_HOST_UNIX) endfunction() function(strip_symbols targetName outputFilename) get_symbol_file_name(${targetName} strip_destination_file) set(${outputFilename} ${strip_destination_file} PARENT_SCOPE) if (CLR_CMAKE_HOST_UNIX) set(strip_source_file $<TARGET_FILE:${targetName}>) if (CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS) # Ensure that dsymutil and strip are present find_program(DSYMUTIL dsymutil) if (DSYMUTIL STREQUAL "DSYMUTIL-NOTFOUND") message(FATAL_ERROR "dsymutil not found") endif() find_program(STRIP strip) if (STRIP STREQUAL "STRIP-NOTFOUND") message(FATAL_ERROR "strip not found") endif() string(TOLOWER "${CMAKE_BUILD_TYPE}" LOWERCASE_CMAKE_BUILD_TYPE) if (LOWERCASE_CMAKE_BUILD_TYPE STREQUAL release) set(strip_command ${STRIP} -no_code_signature_warning -S ${strip_source_file} && codesign -f -s - ${strip_source_file}) else () set(strip_command) endif () add_custom_command( TARGET ${targetName} POST_BUILD VERBATIM COMMAND ${DSYMUTIL} --flat --minimize ${strip_source_file} COMMAND ${strip_command} COMMENT "Stripping symbols from ${strip_source_file} into file ${strip_destination_file}" ) else (CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS) add_custom_command( TARGET ${targetName} POST_BUILD VERBATIM COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${strip_source_file} ${strip_destination_file} COMMAND ${CMAKE_OBJCOPY} --strip-unneeded ${strip_source_file} COMMAND ${CMAKE_OBJCOPY} --add-gnu-debuglink=${strip_destination_file} ${strip_source_file} COMMENT "Stripping symbols from ${strip_source_file} into file ${strip_destination_file}" ) endif (CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS) endif(CLR_CMAKE_HOST_UNIX) endfunction() function(install_with_stripped_symbols targetName kind destination) if(NOT CLR_CMAKE_KEEP_NATIVE_SYMBOLS) strip_symbols(${targetName} symbol_file) install_symbol_file(${symbol_file} ${destination} ${ARGN}) endif() if ((CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS) AND ("${kind}" STREQUAL "TARGETS")) # We want to avoid the kind=TARGET install behaviors which corrupt code signatures on osx-arm64 set(kind PROGRAMS) endif() if ("${kind}" STREQUAL "TARGETS") set(install_source ${targetName}) elseif("${kind}" STREQUAL "PROGRAMS") set(install_source $<TARGET_FILE:${targetName}>) else() message(FATAL_ERROR "The `kind` argument has to be either TARGETS or PROGRAMS, ${kind} was provided instead") endif() install(${kind} ${install_source} DESTINATION ${destination} ${ARGN}) endfunction() function(install_symbol_file symbol_file destination_path) if(CLR_CMAKE_TARGET_WIN32) install(FILES ${symbol_file} DESTINATION ${destination_path}/PDB ${ARGN}) else() install(FILES ${symbol_file} DESTINATION ${destination_path} ${ARGN}) endif() endfunction() function(install_static_library targetName destination component) if (NOT "${component}" STREQUAL "${targetName}") get_property(definedComponents GLOBAL PROPERTY CLR_CMAKE_COMPONENTS) list(FIND definedComponents "${component}" componentIdx) if (${componentIdx} EQUAL -1) message(FATAL_ERROR "The ${component} component is not defined. Add a call to `add_component(${component})` to define the component in the build.") endif() add_dependencies(${component} ${targetName}) endif() install (TARGETS ${targetName} DESTINATION ${destination} COMPONENT ${component}) if (WIN32) set_target_properties(${targetName} PROPERTIES COMPILE_PDB_NAME "${targetName}" COMPILE_PDB_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}" ) install (FILES "$<TARGET_FILE_DIR:${targetName}>/${targetName}.pdb" DESTINATION ${destination} COMPONENT ${component}) endif() endfunction() # install_clr(TARGETS targetName [targetName2 ...] [DESTINATIONS destination [destination2 ...]] [COMPONENT componentName]) function(install_clr) set(multiValueArgs TARGETS DESTINATIONS) set(singleValueArgs COMPONENT) set(options "") cmake_parse_arguments(INSTALL_CLR "${options}" "${singleValueArgs}" "${multiValueArgs}" ${ARGV}) if ("${INSTALL_CLR_TARGETS}" STREQUAL "") message(FATAL_ERROR "At least one target must be passed to install_clr(TARGETS )") endif() if ("${INSTALL_CLR_DESTINATIONS}" STREQUAL "") message(FATAL_ERROR "At least one destination must be passed to install_clr.") endif() set(destinations "") if (NOT "${INSTALL_CLR_DESTINATIONS}" STREQUAL "") list(APPEND destinations ${INSTALL_CLR_DESTINATIONS}) endif() if ("${INSTALL_CLR_COMPONENT}" STREQUAL "") set(INSTALL_CLR_COMPONENT ${CMAKE_INSTALL_DEFAULT_COMPONENT_NAME}) endif() foreach(targetName ${INSTALL_CLR_TARGETS}) if (NOT "${INSTALL_CLR_COMPONENT}" STREQUAL "${targetName}") get_property(definedComponents GLOBAL PROPERTY CLR_CMAKE_COMPONENTS) list(FIND definedComponents "${INSTALL_CLR_COMPONENT}" componentIdx) if (${componentIdx} EQUAL -1) message(FATAL_ERROR "The ${INSTALL_CLR_COMPONENT} component is not defined. Add a call to `add_component(${INSTALL_CLR_COMPONENT})` to define the component in the build.") endif() add_dependencies(${INSTALL_CLR_COMPONENT} ${targetName}) endif() get_target_property(targetType ${targetName} TYPE) if (NOT CLR_CMAKE_KEEP_NATIVE_SYMBOLS AND NOT "${targetType}" STREQUAL "STATIC_LIBRARY") get_symbol_file_name(${targetName} symbolFile) endif() foreach(destination ${destinations}) # We don't need to install the export libraries for our DLLs # since they won't be directly linked against. install(PROGRAMS $<TARGET_FILE:${targetName}> DESTINATION ${destination} COMPONENT ${INSTALL_CLR_COMPONENT}) if (NOT "${symbolFile}" STREQUAL "") install_symbol_file(${symbolFile} ${destination} COMPONENT ${INSTALL_CLR_COMPONENT}) endif() if(CLR_CMAKE_PGO_INSTRUMENT) if(WIN32) get_property(is_multi_config GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) if(is_multi_config) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/$<CONFIG>/${targetName}.pgd DESTINATION ${destination}/PGD OPTIONAL COMPONENT ${INSTALL_CLR_COMPONENT}) else() install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${targetName}.pgd DESTINATION ${destination}/PGD OPTIONAL COMPONENT ${INSTALL_CLR_COMPONENT}) endif() endif() endif() endforeach() endforeach() endfunction() # Disable PAX mprotect that would prevent JIT and other codegen in coreclr from working. # PAX mprotect prevents: # - changing the executable status of memory pages that were # not originally created as executable, # - making read-only executable pages writable again, # - creating executable pages from anonymous memory, # - making read-only-after-relocations (RELRO) data pages writable again. function(disable_pax_mprotect targetName) # Disabling PAX hardening only makes sense in systems that use Elf image formats. Particularly, looking # for paxctl in macOS is problematic as it collides with popular software for that OS that performs completely # unrelated functionality. Only look for it when we'll generate Elf images. if (CLR_CMAKE_HOST_LINUX OR CLR_CMAKE_HOST_FREEBSD OR CLR_CMAKE_HOST_NETBSD OR CLR_CMAKE_HOST_SUNOS) # Try to locate the paxctl tool. Failure to find it is not fatal, # but the generated executables won't work on a system where PAX is set # to prevent applications to create executable memory mappings. find_program(PAXCTL paxctl) if (NOT PAXCTL STREQUAL "PAXCTL-NOTFOUND") add_custom_command( TARGET ${targetName} POST_BUILD VERBATIM COMMAND ${PAXCTL} -c -m $<TARGET_FILE:${targetName}> ) endif() endif(CLR_CMAKE_HOST_LINUX OR CLR_CMAKE_HOST_FREEBSD OR CLR_CMAKE_HOST_NETBSD OR CLR_CMAKE_HOST_SUNOS) endfunction() if (CMAKE_VERSION VERSION_LESS "3.12") # Polyfill add_compile_definitions when it is unavailable function(add_compile_definitions) get_directory_property(DIR_COMPILE_DEFINITIONS COMPILE_DEFINITIONS) list(APPEND DIR_COMPILE_DEFINITIONS ${ARGV}) set_directory_properties(PROPERTIES COMPILE_DEFINITIONS "${DIR_COMPILE_DEFINITIONS}") endfunction() endif() if (CMAKE_VERSION VERSION_LESS "3.16") # Provide a no-op polyfill for precompiled headers on old CMake versions function(target_precompile_headers) endfunction() endif() # add_linker_flag(Flag [Config1 Config2 ...]) function(add_linker_flag Flag) if (ARGN STREQUAL "") set("CMAKE_EXE_LINKER_FLAGS" "${CMAKE_EXE_LINKER_FLAGS} ${Flag}" PARENT_SCOPE) set("CMAKE_SHARED_LINKER_FLAGS" "${CMAKE_SHARED_LINKER_FLAGS} ${Flag}" PARENT_SCOPE) else() foreach(Config ${ARGN}) set("CMAKE_EXE_LINKER_FLAGS_${Config}" "${CMAKE_EXE_LINKER_FLAGS_${Config}} ${Flag}" PARENT_SCOPE) set("CMAKE_SHARED_LINKER_FLAGS_${Config}" "${CMAKE_SHARED_LINKER_FLAGS_${Config}} ${Flag}" PARENT_SCOPE) endforeach() endif() endfunction() function(link_natvis_sources_for_target targetName linkKind) if (NOT CLR_CMAKE_HOST_WIN32) return() endif() foreach(source ${ARGN}) if (NOT IS_ABSOLUTE "${source}") convert_to_absolute_path(source ${source}) endif() get_filename_component(extension "${source}" EXT) if ("${extension}" STREQUAL ".natvis") message("Embedding natvis ${source}") # Since natvis embedding is only supported on Windows # we can use target_link_options since our minimum version is high enough target_link_options(${targetName} "${linkKind}" "-NATVIS:${source}") endif() endforeach() endfunction() function(add_executable_clr targetName) if(NOT WIN32) add_executable(${ARGV} ${VERSION_FILE_PATH}) disable_pax_mprotect(${ARGV}) else() add_executable(${ARGV}) endif(NOT WIN32) if(NOT CLR_CMAKE_KEEP_NATIVE_SYMBOLS) strip_symbols(${ARGV0} symbolFile) endif() endfunction() function(add_library_clr targetName kind) if(NOT WIN32 AND "${kind}" STREQUAL "SHARED") add_library(${ARGV} ${VERSION_FILE_PATH}) else() add_library(${ARGV}) endif() if("${kind}" STREQUAL "SHARED" AND NOT CLR_CMAKE_KEEP_NATIVE_SYMBOLS) strip_symbols(${ARGV0} symbolFile) endif() endfunction()
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/coreclr/pal/tools/smarty.sh
if ! test "$_TGTCPU" then echo !!!ERROR: _TGTCPU not set. Please run preptests.sh fi if ! test "$CORE_RUN" then echo !!!ERROR: CORE_RUN not set. Please run preptests.sh fi if ! test "$CORE_ROOT" then echo !!!ERROR: CORE_ROOT not set. Please run preptests.sh fi if ! test "$BVT_ROOT" then export BVT_ROOT=$PWD fi if [ -n "$PERL5LIB" ]; then if [ -z "`expr $PERL5LIB : ".*\($BVT_ROOT/Common/Smarty\)"`" ]; then export PERL5LIB="$PERL5LIB:$BVT_ROOT/Common/Smarty" fi else export PERL5LIB=$BVT_ROOT/Common/Smarty fi perl Common/Smarty/Smarty.pl $*
if ! test "$_TGTCPU" then echo !!!ERROR: _TGTCPU not set. Please run preptests.sh fi if ! test "$CORE_RUN" then echo !!!ERROR: CORE_RUN not set. Please run preptests.sh fi if ! test "$CORE_ROOT" then echo !!!ERROR: CORE_ROOT not set. Please run preptests.sh fi if ! test "$BVT_ROOT" then export BVT_ROOT=$PWD fi if [ -n "$PERL5LIB" ]; then if [ -z "`expr $PERL5LIB : ".*\($BVT_ROOT/Common/Smarty\)"`" ]; then export PERL5LIB="$PERL5LIB:$BVT_ROOT/Common/Smarty" fi else export PERL5LIB=$BVT_ROOT/Common/Smarty fi perl Common/Smarty/Smarty.pl $*
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/libraries/Common/tests/System/Net/EnterpriseTests/setup/linuxclient/test-webserver.sh
#!/usr/bin/env bash kdestroy echo password | kinit user1 curl --verbose --negotiate -u: http://apacheweb.linux.contoso.com kdestroy nslookup github.com
#!/usr/bin/env bash kdestroy echo password | kinit user1 curl --verbose --negotiate -u: http://apacheweb.linux.contoso.com kdestroy nslookup github.com
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/mono/mono/tests/verifier/make_ldtoken_test.sh
#! /bin/sh SED="sed" if [ `which gsed 2> /dev/null` ]; then SED="gsed" fi TEST_NAME=$1 TEST_VALIDITY=$2 TEST_OP=$3 TEST_CALL_OP=$4 TEST_FILE=`echo ${TEST_VALIDITY}_${TEST_NAME} | $SED -e 's/ /_/g' -e 's/\./_/g' -e 's/&/mp/g' -e 's/\[/_/g' -e 's/\]/_/g'`_generated.il echo $TEST_FILE TEST_TYPE1=`echo $TEST_TYPE1 | $SED -s 's/&/\\\&/'` $SED -e "s/OPCODE/${TEST_OP}/g" -e "s/VALIDITY/${TEST_VALIDITY}/g" -e "s/CALL_OP/${TEST_CALL_OP}/g" > $TEST_FILE <<//EOF .assembly extern mscorlib { .ver 2:0:0:0 .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) // .z\V.4.. } .assembly 'bla' { .hash algorithm 0x00008004 .ver 0:0:0:0 } .module ldtoken_type.exe .class public auto ansi beforefieldinit Example extends [mscorlib]System.Object { .field public int32 fld .method public static void Method () { ret } .method public static int32 Main () { .entrypoint .maxstack 8 OPCODE // VALIDITY CALL_OP pop ldc.i4.0 ret } } //EOF
#! /bin/sh SED="sed" if [ `which gsed 2> /dev/null` ]; then SED="gsed" fi TEST_NAME=$1 TEST_VALIDITY=$2 TEST_OP=$3 TEST_CALL_OP=$4 TEST_FILE=`echo ${TEST_VALIDITY}_${TEST_NAME} | $SED -e 's/ /_/g' -e 's/\./_/g' -e 's/&/mp/g' -e 's/\[/_/g' -e 's/\]/_/g'`_generated.il echo $TEST_FILE TEST_TYPE1=`echo $TEST_TYPE1 | $SED -s 's/&/\\\&/'` $SED -e "s/OPCODE/${TEST_OP}/g" -e "s/VALIDITY/${TEST_VALIDITY}/g" -e "s/CALL_OP/${TEST_CALL_OP}/g" > $TEST_FILE <<//EOF .assembly extern mscorlib { .ver 2:0:0:0 .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) // .z\V.4.. } .assembly 'bla' { .hash algorithm 0x00008004 .ver 0:0:0:0 } .module ldtoken_type.exe .class public auto ansi beforefieldinit Example extends [mscorlib]System.Object { .field public int32 fld .method public static void Method () { ret } .method public static int32 Main () { .entrypoint .maxstack 8 OPCODE // VALIDITY CALL_OP pop ldc.i4.0 ret } } //EOF
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/libraries/System.Reflection.Metadata/src/System/Reflection/Metadata/Internal/StringHeap.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Reflection.Internal; using System.Runtime.InteropServices; using System.Text; using System.Threading; namespace System.Reflection.Metadata.Ecma335 { internal struct StringHeap { private static string[]? s_virtualValues; internal readonly MemoryBlock Block; private VirtualHeap? _lazyVirtualHeap; internal StringHeap(MemoryBlock block, MetadataKind metadataKind) { _lazyVirtualHeap = null; if (s_virtualValues == null && metadataKind != MetadataKind.Ecma335) { // Note: // Virtual values shall not contain surrogates, otherwise StartsWith might be inconsistent // when comparing to a text that ends with a high surrogate. var values = new string[(int)StringHandle.VirtualIndex.Count]; values[(int)StringHandle.VirtualIndex.System_Runtime_WindowsRuntime] = "System.Runtime.WindowsRuntime"; values[(int)StringHandle.VirtualIndex.System_Runtime] = "System.Runtime"; values[(int)StringHandle.VirtualIndex.System_ObjectModel] = "System.ObjectModel"; values[(int)StringHandle.VirtualIndex.System_Runtime_WindowsRuntime_UI_Xaml] = "System.Runtime.WindowsRuntime.UI.Xaml"; values[(int)StringHandle.VirtualIndex.System_Runtime_InteropServices_WindowsRuntime] = "System.Runtime.InteropServices.WindowsRuntime"; values[(int)StringHandle.VirtualIndex.System_Numerics_Vectors] = "System.Numerics.Vectors"; values[(int)StringHandle.VirtualIndex.Dispose] = "Dispose"; values[(int)StringHandle.VirtualIndex.AttributeTargets] = "AttributeTargets"; values[(int)StringHandle.VirtualIndex.AttributeUsageAttribute] = "AttributeUsageAttribute"; values[(int)StringHandle.VirtualIndex.Color] = "Color"; values[(int)StringHandle.VirtualIndex.CornerRadius] = "CornerRadius"; values[(int)StringHandle.VirtualIndex.DateTimeOffset] = "DateTimeOffset"; values[(int)StringHandle.VirtualIndex.Duration] = "Duration"; values[(int)StringHandle.VirtualIndex.DurationType] = "DurationType"; values[(int)StringHandle.VirtualIndex.EventHandler1] = "EventHandler`1"; values[(int)StringHandle.VirtualIndex.EventRegistrationToken] = "EventRegistrationToken"; values[(int)StringHandle.VirtualIndex.Exception] = "Exception"; values[(int)StringHandle.VirtualIndex.GeneratorPosition] = "GeneratorPosition"; values[(int)StringHandle.VirtualIndex.GridLength] = "GridLength"; values[(int)StringHandle.VirtualIndex.GridUnitType] = "GridUnitType"; values[(int)StringHandle.VirtualIndex.ICommand] = "ICommand"; values[(int)StringHandle.VirtualIndex.IDictionary2] = "IDictionary`2"; values[(int)StringHandle.VirtualIndex.IDisposable] = "IDisposable"; values[(int)StringHandle.VirtualIndex.IEnumerable] = "IEnumerable"; values[(int)StringHandle.VirtualIndex.IEnumerable1] = "IEnumerable`1"; values[(int)StringHandle.VirtualIndex.IList] = "IList"; values[(int)StringHandle.VirtualIndex.IList1] = "IList`1"; values[(int)StringHandle.VirtualIndex.INotifyCollectionChanged] = "INotifyCollectionChanged"; values[(int)StringHandle.VirtualIndex.INotifyPropertyChanged] = "INotifyPropertyChanged"; values[(int)StringHandle.VirtualIndex.IReadOnlyDictionary2] = "IReadOnlyDictionary`2"; values[(int)StringHandle.VirtualIndex.IReadOnlyList1] = "IReadOnlyList`1"; values[(int)StringHandle.VirtualIndex.KeyTime] = "KeyTime"; values[(int)StringHandle.VirtualIndex.KeyValuePair2] = "KeyValuePair`2"; values[(int)StringHandle.VirtualIndex.Matrix] = "Matrix"; values[(int)StringHandle.VirtualIndex.Matrix3D] = "Matrix3D"; values[(int)StringHandle.VirtualIndex.Matrix3x2] = "Matrix3x2"; values[(int)StringHandle.VirtualIndex.Matrix4x4] = "Matrix4x4"; values[(int)StringHandle.VirtualIndex.NotifyCollectionChangedAction] = "NotifyCollectionChangedAction"; values[(int)StringHandle.VirtualIndex.NotifyCollectionChangedEventArgs] = "NotifyCollectionChangedEventArgs"; values[(int)StringHandle.VirtualIndex.NotifyCollectionChangedEventHandler] = "NotifyCollectionChangedEventHandler"; values[(int)StringHandle.VirtualIndex.Nullable1] = "Nullable`1"; values[(int)StringHandle.VirtualIndex.Plane] = "Plane"; values[(int)StringHandle.VirtualIndex.Point] = "Point"; values[(int)StringHandle.VirtualIndex.PropertyChangedEventArgs] = "PropertyChangedEventArgs"; values[(int)StringHandle.VirtualIndex.PropertyChangedEventHandler] = "PropertyChangedEventHandler"; values[(int)StringHandle.VirtualIndex.Quaternion] = "Quaternion"; values[(int)StringHandle.VirtualIndex.Rect] = "Rect"; values[(int)StringHandle.VirtualIndex.RepeatBehavior] = "RepeatBehavior"; values[(int)StringHandle.VirtualIndex.RepeatBehaviorType] = "RepeatBehaviorType"; values[(int)StringHandle.VirtualIndex.Size] = "Size"; values[(int)StringHandle.VirtualIndex.System] = "System"; values[(int)StringHandle.VirtualIndex.System_Collections] = "System.Collections"; values[(int)StringHandle.VirtualIndex.System_Collections_Generic] = "System.Collections.Generic"; values[(int)StringHandle.VirtualIndex.System_Collections_Specialized] = "System.Collections.Specialized"; values[(int)StringHandle.VirtualIndex.System_ComponentModel] = "System.ComponentModel"; values[(int)StringHandle.VirtualIndex.System_Numerics] = "System.Numerics"; values[(int)StringHandle.VirtualIndex.System_Windows_Input] = "System.Windows.Input"; values[(int)StringHandle.VirtualIndex.Thickness] = "Thickness"; values[(int)StringHandle.VirtualIndex.TimeSpan] = "TimeSpan"; values[(int)StringHandle.VirtualIndex.Type] = "Type"; values[(int)StringHandle.VirtualIndex.Uri] = "Uri"; values[(int)StringHandle.VirtualIndex.Vector2] = "Vector2"; values[(int)StringHandle.VirtualIndex.Vector3] = "Vector3"; values[(int)StringHandle.VirtualIndex.Vector4] = "Vector4"; values[(int)StringHandle.VirtualIndex.Windows_Foundation] = "Windows.Foundation"; values[(int)StringHandle.VirtualIndex.Windows_UI] = "Windows.UI"; values[(int)StringHandle.VirtualIndex.Windows_UI_Xaml] = "Windows.UI.Xaml"; values[(int)StringHandle.VirtualIndex.Windows_UI_Xaml_Controls_Primitives] = "Windows.UI.Xaml.Controls.Primitives"; values[(int)StringHandle.VirtualIndex.Windows_UI_Xaml_Media] = "Windows.UI.Xaml.Media"; values[(int)StringHandle.VirtualIndex.Windows_UI_Xaml_Media_Animation] = "Windows.UI.Xaml.Media.Animation"; values[(int)StringHandle.VirtualIndex.Windows_UI_Xaml_Media_Media3D] = "Windows.UI.Xaml.Media.Media3D"; s_virtualValues = values; AssertFilled(); } this.Block = TrimEnd(block); } [Conditional("DEBUG")] private static void AssertFilled() { for (int i = 0; i < s_virtualValues!.Length; i++) { Debug.Assert(s_virtualValues[i] != null, $"Missing virtual value for StringHandle.VirtualIndex.{(StringHandle.VirtualIndex)i}"); } } // Trims the alignment padding of the heap. // See StgStringPool::InitOnMem in ndp\clr\src\Utilcode\StgPool.cpp. // This is especially important for EnC. private static MemoryBlock TrimEnd(MemoryBlock block) { if (block.Length == 0) { return block; } int i = block.Length - 1; while (i >= 0 && block.PeekByte(i) == 0) { i--; } // this shouldn't happen in valid metadata: if (i == block.Length - 1) { return block; } // +1 for terminating \0 return block.GetMemoryBlockAt(0, i + 2); } internal string GetString(StringHandle handle, MetadataStringDecoder utf8Decoder) { return handle.IsVirtual ? GetVirtualHandleString(handle, utf8Decoder) : GetNonVirtualString(handle, utf8Decoder, prefixOpt: null); } internal MemoryBlock GetMemoryBlock(StringHandle handle) { return handle.IsVirtual ? GetVirtualHandleMemoryBlock(handle) : GetNonVirtualStringMemoryBlock(handle); } internal static string GetVirtualString(StringHandle.VirtualIndex index) { return s_virtualValues![(int)index]; } private string GetNonVirtualString(StringHandle handle, MetadataStringDecoder utf8Decoder, byte[]? prefixOpt) { Debug.Assert(handle.StringKind != StringKind.Virtual); char otherTerminator = handle.StringKind == StringKind.DotTerminated ? '.' : '\0'; return Block.PeekUtf8NullTerminated(handle.GetHeapOffset(), prefixOpt, utf8Decoder, out _, otherTerminator); } private unsafe MemoryBlock GetNonVirtualStringMemoryBlock(StringHandle handle) { Debug.Assert(handle.StringKind != StringKind.Virtual); char otherTerminator = handle.StringKind == StringKind.DotTerminated ? '.' : '\0'; int offset = handle.GetHeapOffset(); int length = Block.GetUtf8NullTerminatedLength(offset, out _, otherTerminator); return new MemoryBlock(Block.Pointer + offset, length); } private unsafe byte[] GetNonVirtualStringBytes(StringHandle handle, byte[] prefix) { Debug.Assert(handle.StringKind != StringKind.Virtual); var block = GetNonVirtualStringMemoryBlock(handle); var bytes = new byte[prefix.Length + block.Length]; Buffer.BlockCopy(prefix, 0, bytes, 0, prefix.Length); Marshal.Copy((IntPtr)block.Pointer, bytes, prefix.Length, block.Length); return bytes; } private string GetVirtualHandleString(StringHandle handle, MetadataStringDecoder utf8Decoder) { Debug.Assert(handle.IsVirtual); return handle.StringKind switch { StringKind.Virtual => GetVirtualString(handle.GetVirtualIndex()), StringKind.WinRTPrefixed => GetNonVirtualString(handle, utf8Decoder, MetadataReader.WinRTPrefix), _ => throw ExceptionUtilities.UnexpectedValue(handle.StringKind), }; } private MemoryBlock GetVirtualHandleMemoryBlock(StringHandle handle) { Debug.Assert(handle.IsVirtual); var heap = VirtualHeap.GetOrCreateVirtualHeap(ref _lazyVirtualHeap); lock (heap) { if (!heap.TryGetMemoryBlock(handle.RawValue, out var block)) { byte[] bytes = handle.StringKind switch { StringKind.Virtual => Encoding.UTF8.GetBytes(GetVirtualString(handle.GetVirtualIndex())), StringKind.WinRTPrefixed => GetNonVirtualStringBytes(handle, MetadataReader.WinRTPrefix), _ => throw ExceptionUtilities.UnexpectedValue(handle.StringKind), }; block = heap.AddBlob(handle.RawValue, bytes); } return block; } } internal BlobReader GetBlobReader(StringHandle handle) { return new BlobReader(GetMemoryBlock(handle)); } internal StringHandle GetNextHandle(StringHandle handle) { if (handle.IsVirtual) { return default(StringHandle); } int terminator = this.Block.IndexOf(0, handle.GetHeapOffset()); if (terminator == -1 || terminator == Block.Length - 1) { return default(StringHandle); } return StringHandle.FromOffset(terminator + 1); } internal bool Equals(StringHandle handle, string value, MetadataStringDecoder utf8Decoder, bool ignoreCase) { Debug.Assert(value != null); if (handle.IsVirtual) { // TODO: This can allocate unnecessarily for <WinRT> prefixed handles. return string.Equals(GetString(handle, utf8Decoder), value, ignoreCase ? StringComparison.OrdinalIgnoreCase : StringComparison.Ordinal); } if (handle.IsNil) { return value.Length == 0; } char otherTerminator = handle.StringKind == StringKind.DotTerminated ? '.' : '\0'; return this.Block.Utf8NullTerminatedEquals(handle.GetHeapOffset(), value, utf8Decoder, otherTerminator, ignoreCase); } internal bool StartsWith(StringHandle handle, string value, MetadataStringDecoder utf8Decoder, bool ignoreCase) { Debug.Assert(value != null); if (handle.IsVirtual) { // TODO: This can allocate unnecessarily for <WinRT> prefixed handles. return GetString(handle, utf8Decoder).StartsWith(value, ignoreCase ? StringComparison.OrdinalIgnoreCase : StringComparison.Ordinal); } if (handle.IsNil) { return value.Length == 0; } char otherTerminator = handle.StringKind == StringKind.DotTerminated ? '.' : '\0'; return this.Block.Utf8NullTerminatedStartsWith(handle.GetHeapOffset(), value, utf8Decoder, otherTerminator, ignoreCase); } /// <summary> /// Returns true if the given raw (non-virtual) handle represents the same string as given ASCII string. /// </summary> internal bool EqualsRaw(StringHandle rawHandle, string asciiString) { Debug.Assert(!rawHandle.IsVirtual); Debug.Assert(rawHandle.StringKind != StringKind.DotTerminated, "Not supported"); return this.Block.CompareUtf8NullTerminatedStringWithAsciiString(rawHandle.GetHeapOffset(), asciiString) == 0; } /// <summary> /// Returns the heap index of the given ASCII character or -1 if not found prior null terminator or end of heap. /// </summary> internal int IndexOfRaw(int startIndex, char asciiChar) { Debug.Assert(asciiChar != 0 && asciiChar <= 0x7f); return this.Block.Utf8NullTerminatedOffsetOfAsciiChar(startIndex, asciiChar); } /// <summary> /// Returns true if the given raw (non-virtual) handle represents a string that starts with given ASCII prefix. /// </summary> internal bool StartsWithRaw(StringHandle rawHandle, string asciiPrefix) { Debug.Assert(!rawHandle.IsVirtual); Debug.Assert(rawHandle.StringKind != StringKind.DotTerminated, "Not supported"); return this.Block.Utf8NullTerminatedStringStartsWithAsciiPrefix(rawHandle.GetHeapOffset(), asciiPrefix); } /// <summary> /// Equivalent to Array.BinarySearch, searches for given raw (non-virtual) handle in given array of ASCII strings. /// </summary> internal int BinarySearchRaw(string[] asciiKeys, StringHandle rawHandle) { Debug.Assert(!rawHandle.IsVirtual); Debug.Assert(rawHandle.StringKind != StringKind.DotTerminated, "Not supported"); return this.Block.BinarySearch(asciiKeys, rawHandle.GetHeapOffset()); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Reflection.Internal; using System.Runtime.InteropServices; using System.Text; using System.Threading; namespace System.Reflection.Metadata.Ecma335 { internal struct StringHeap { private static string[]? s_virtualValues; internal readonly MemoryBlock Block; private VirtualHeap? _lazyVirtualHeap; internal StringHeap(MemoryBlock block, MetadataKind metadataKind) { _lazyVirtualHeap = null; if (s_virtualValues == null && metadataKind != MetadataKind.Ecma335) { // Note: // Virtual values shall not contain surrogates, otherwise StartsWith might be inconsistent // when comparing to a text that ends with a high surrogate. var values = new string[(int)StringHandle.VirtualIndex.Count]; values[(int)StringHandle.VirtualIndex.System_Runtime_WindowsRuntime] = "System.Runtime.WindowsRuntime"; values[(int)StringHandle.VirtualIndex.System_Runtime] = "System.Runtime"; values[(int)StringHandle.VirtualIndex.System_ObjectModel] = "System.ObjectModel"; values[(int)StringHandle.VirtualIndex.System_Runtime_WindowsRuntime_UI_Xaml] = "System.Runtime.WindowsRuntime.UI.Xaml"; values[(int)StringHandle.VirtualIndex.System_Runtime_InteropServices_WindowsRuntime] = "System.Runtime.InteropServices.WindowsRuntime"; values[(int)StringHandle.VirtualIndex.System_Numerics_Vectors] = "System.Numerics.Vectors"; values[(int)StringHandle.VirtualIndex.Dispose] = "Dispose"; values[(int)StringHandle.VirtualIndex.AttributeTargets] = "AttributeTargets"; values[(int)StringHandle.VirtualIndex.AttributeUsageAttribute] = "AttributeUsageAttribute"; values[(int)StringHandle.VirtualIndex.Color] = "Color"; values[(int)StringHandle.VirtualIndex.CornerRadius] = "CornerRadius"; values[(int)StringHandle.VirtualIndex.DateTimeOffset] = "DateTimeOffset"; values[(int)StringHandle.VirtualIndex.Duration] = "Duration"; values[(int)StringHandle.VirtualIndex.DurationType] = "DurationType"; values[(int)StringHandle.VirtualIndex.EventHandler1] = "EventHandler`1"; values[(int)StringHandle.VirtualIndex.EventRegistrationToken] = "EventRegistrationToken"; values[(int)StringHandle.VirtualIndex.Exception] = "Exception"; values[(int)StringHandle.VirtualIndex.GeneratorPosition] = "GeneratorPosition"; values[(int)StringHandle.VirtualIndex.GridLength] = "GridLength"; values[(int)StringHandle.VirtualIndex.GridUnitType] = "GridUnitType"; values[(int)StringHandle.VirtualIndex.ICommand] = "ICommand"; values[(int)StringHandle.VirtualIndex.IDictionary2] = "IDictionary`2"; values[(int)StringHandle.VirtualIndex.IDisposable] = "IDisposable"; values[(int)StringHandle.VirtualIndex.IEnumerable] = "IEnumerable"; values[(int)StringHandle.VirtualIndex.IEnumerable1] = "IEnumerable`1"; values[(int)StringHandle.VirtualIndex.IList] = "IList"; values[(int)StringHandle.VirtualIndex.IList1] = "IList`1"; values[(int)StringHandle.VirtualIndex.INotifyCollectionChanged] = "INotifyCollectionChanged"; values[(int)StringHandle.VirtualIndex.INotifyPropertyChanged] = "INotifyPropertyChanged"; values[(int)StringHandle.VirtualIndex.IReadOnlyDictionary2] = "IReadOnlyDictionary`2"; values[(int)StringHandle.VirtualIndex.IReadOnlyList1] = "IReadOnlyList`1"; values[(int)StringHandle.VirtualIndex.KeyTime] = "KeyTime"; values[(int)StringHandle.VirtualIndex.KeyValuePair2] = "KeyValuePair`2"; values[(int)StringHandle.VirtualIndex.Matrix] = "Matrix"; values[(int)StringHandle.VirtualIndex.Matrix3D] = "Matrix3D"; values[(int)StringHandle.VirtualIndex.Matrix3x2] = "Matrix3x2"; values[(int)StringHandle.VirtualIndex.Matrix4x4] = "Matrix4x4"; values[(int)StringHandle.VirtualIndex.NotifyCollectionChangedAction] = "NotifyCollectionChangedAction"; values[(int)StringHandle.VirtualIndex.NotifyCollectionChangedEventArgs] = "NotifyCollectionChangedEventArgs"; values[(int)StringHandle.VirtualIndex.NotifyCollectionChangedEventHandler] = "NotifyCollectionChangedEventHandler"; values[(int)StringHandle.VirtualIndex.Nullable1] = "Nullable`1"; values[(int)StringHandle.VirtualIndex.Plane] = "Plane"; values[(int)StringHandle.VirtualIndex.Point] = "Point"; values[(int)StringHandle.VirtualIndex.PropertyChangedEventArgs] = "PropertyChangedEventArgs"; values[(int)StringHandle.VirtualIndex.PropertyChangedEventHandler] = "PropertyChangedEventHandler"; values[(int)StringHandle.VirtualIndex.Quaternion] = "Quaternion"; values[(int)StringHandle.VirtualIndex.Rect] = "Rect"; values[(int)StringHandle.VirtualIndex.RepeatBehavior] = "RepeatBehavior"; values[(int)StringHandle.VirtualIndex.RepeatBehaviorType] = "RepeatBehaviorType"; values[(int)StringHandle.VirtualIndex.Size] = "Size"; values[(int)StringHandle.VirtualIndex.System] = "System"; values[(int)StringHandle.VirtualIndex.System_Collections] = "System.Collections"; values[(int)StringHandle.VirtualIndex.System_Collections_Generic] = "System.Collections.Generic"; values[(int)StringHandle.VirtualIndex.System_Collections_Specialized] = "System.Collections.Specialized"; values[(int)StringHandle.VirtualIndex.System_ComponentModel] = "System.ComponentModel"; values[(int)StringHandle.VirtualIndex.System_Numerics] = "System.Numerics"; values[(int)StringHandle.VirtualIndex.System_Windows_Input] = "System.Windows.Input"; values[(int)StringHandle.VirtualIndex.Thickness] = "Thickness"; values[(int)StringHandle.VirtualIndex.TimeSpan] = "TimeSpan"; values[(int)StringHandle.VirtualIndex.Type] = "Type"; values[(int)StringHandle.VirtualIndex.Uri] = "Uri"; values[(int)StringHandle.VirtualIndex.Vector2] = "Vector2"; values[(int)StringHandle.VirtualIndex.Vector3] = "Vector3"; values[(int)StringHandle.VirtualIndex.Vector4] = "Vector4"; values[(int)StringHandle.VirtualIndex.Windows_Foundation] = "Windows.Foundation"; values[(int)StringHandle.VirtualIndex.Windows_UI] = "Windows.UI"; values[(int)StringHandle.VirtualIndex.Windows_UI_Xaml] = "Windows.UI.Xaml"; values[(int)StringHandle.VirtualIndex.Windows_UI_Xaml_Controls_Primitives] = "Windows.UI.Xaml.Controls.Primitives"; values[(int)StringHandle.VirtualIndex.Windows_UI_Xaml_Media] = "Windows.UI.Xaml.Media"; values[(int)StringHandle.VirtualIndex.Windows_UI_Xaml_Media_Animation] = "Windows.UI.Xaml.Media.Animation"; values[(int)StringHandle.VirtualIndex.Windows_UI_Xaml_Media_Media3D] = "Windows.UI.Xaml.Media.Media3D"; s_virtualValues = values; AssertFilled(); } this.Block = TrimEnd(block); } [Conditional("DEBUG")] private static void AssertFilled() { for (int i = 0; i < s_virtualValues!.Length; i++) { Debug.Assert(s_virtualValues[i] != null, $"Missing virtual value for StringHandle.VirtualIndex.{(StringHandle.VirtualIndex)i}"); } } // Trims the alignment padding of the heap. // See StgStringPool::InitOnMem in ndp\clr\src\Utilcode\StgPool.cpp. // This is especially important for EnC. private static MemoryBlock TrimEnd(MemoryBlock block) { if (block.Length == 0) { return block; } int i = block.Length - 1; while (i >= 0 && block.PeekByte(i) == 0) { i--; } // this shouldn't happen in valid metadata: if (i == block.Length - 1) { return block; } // +1 for terminating \0 return block.GetMemoryBlockAt(0, i + 2); } internal string GetString(StringHandle handle, MetadataStringDecoder utf8Decoder) { return handle.IsVirtual ? GetVirtualHandleString(handle, utf8Decoder) : GetNonVirtualString(handle, utf8Decoder, prefixOpt: null); } internal MemoryBlock GetMemoryBlock(StringHandle handle) { return handle.IsVirtual ? GetVirtualHandleMemoryBlock(handle) : GetNonVirtualStringMemoryBlock(handle); } internal static string GetVirtualString(StringHandle.VirtualIndex index) { return s_virtualValues![(int)index]; } private string GetNonVirtualString(StringHandle handle, MetadataStringDecoder utf8Decoder, byte[]? prefixOpt) { Debug.Assert(handle.StringKind != StringKind.Virtual); char otherTerminator = handle.StringKind == StringKind.DotTerminated ? '.' : '\0'; return Block.PeekUtf8NullTerminated(handle.GetHeapOffset(), prefixOpt, utf8Decoder, out _, otherTerminator); } private unsafe MemoryBlock GetNonVirtualStringMemoryBlock(StringHandle handle) { Debug.Assert(handle.StringKind != StringKind.Virtual); char otherTerminator = handle.StringKind == StringKind.DotTerminated ? '.' : '\0'; int offset = handle.GetHeapOffset(); int length = Block.GetUtf8NullTerminatedLength(offset, out _, otherTerminator); return new MemoryBlock(Block.Pointer + offset, length); } private unsafe byte[] GetNonVirtualStringBytes(StringHandle handle, byte[] prefix) { Debug.Assert(handle.StringKind != StringKind.Virtual); var block = GetNonVirtualStringMemoryBlock(handle); var bytes = new byte[prefix.Length + block.Length]; Buffer.BlockCopy(prefix, 0, bytes, 0, prefix.Length); Marshal.Copy((IntPtr)block.Pointer, bytes, prefix.Length, block.Length); return bytes; } private string GetVirtualHandleString(StringHandle handle, MetadataStringDecoder utf8Decoder) { Debug.Assert(handle.IsVirtual); return handle.StringKind switch { StringKind.Virtual => GetVirtualString(handle.GetVirtualIndex()), StringKind.WinRTPrefixed => GetNonVirtualString(handle, utf8Decoder, MetadataReader.WinRTPrefix), _ => throw ExceptionUtilities.UnexpectedValue(handle.StringKind), }; } private MemoryBlock GetVirtualHandleMemoryBlock(StringHandle handle) { Debug.Assert(handle.IsVirtual); var heap = VirtualHeap.GetOrCreateVirtualHeap(ref _lazyVirtualHeap); lock (heap) { if (!heap.TryGetMemoryBlock(handle.RawValue, out var block)) { byte[] bytes = handle.StringKind switch { StringKind.Virtual => Encoding.UTF8.GetBytes(GetVirtualString(handle.GetVirtualIndex())), StringKind.WinRTPrefixed => GetNonVirtualStringBytes(handle, MetadataReader.WinRTPrefix), _ => throw ExceptionUtilities.UnexpectedValue(handle.StringKind), }; block = heap.AddBlob(handle.RawValue, bytes); } return block; } } internal BlobReader GetBlobReader(StringHandle handle) { return new BlobReader(GetMemoryBlock(handle)); } internal StringHandle GetNextHandle(StringHandle handle) { if (handle.IsVirtual) { return default(StringHandle); } int terminator = this.Block.IndexOf(0, handle.GetHeapOffset()); if (terminator == -1 || terminator == Block.Length - 1) { return default(StringHandle); } return StringHandle.FromOffset(terminator + 1); } internal bool Equals(StringHandle handle, string value, MetadataStringDecoder utf8Decoder, bool ignoreCase) { Debug.Assert(value != null); if (handle.IsVirtual) { // TODO: This can allocate unnecessarily for <WinRT> prefixed handles. return string.Equals(GetString(handle, utf8Decoder), value, ignoreCase ? StringComparison.OrdinalIgnoreCase : StringComparison.Ordinal); } if (handle.IsNil) { return value.Length == 0; } char otherTerminator = handle.StringKind == StringKind.DotTerminated ? '.' : '\0'; return this.Block.Utf8NullTerminatedEquals(handle.GetHeapOffset(), value, utf8Decoder, otherTerminator, ignoreCase); } internal bool StartsWith(StringHandle handle, string value, MetadataStringDecoder utf8Decoder, bool ignoreCase) { Debug.Assert(value != null); if (handle.IsVirtual) { // TODO: This can allocate unnecessarily for <WinRT> prefixed handles. return GetString(handle, utf8Decoder).StartsWith(value, ignoreCase ? StringComparison.OrdinalIgnoreCase : StringComparison.Ordinal); } if (handle.IsNil) { return value.Length == 0; } char otherTerminator = handle.StringKind == StringKind.DotTerminated ? '.' : '\0'; return this.Block.Utf8NullTerminatedStartsWith(handle.GetHeapOffset(), value, utf8Decoder, otherTerminator, ignoreCase); } /// <summary> /// Returns true if the given raw (non-virtual) handle represents the same string as given ASCII string. /// </summary> internal bool EqualsRaw(StringHandle rawHandle, string asciiString) { Debug.Assert(!rawHandle.IsVirtual); Debug.Assert(rawHandle.StringKind != StringKind.DotTerminated, "Not supported"); return this.Block.CompareUtf8NullTerminatedStringWithAsciiString(rawHandle.GetHeapOffset(), asciiString) == 0; } /// <summary> /// Returns the heap index of the given ASCII character or -1 if not found prior null terminator or end of heap. /// </summary> internal int IndexOfRaw(int startIndex, char asciiChar) { Debug.Assert(asciiChar != 0 && asciiChar <= 0x7f); return this.Block.Utf8NullTerminatedOffsetOfAsciiChar(startIndex, asciiChar); } /// <summary> /// Returns true if the given raw (non-virtual) handle represents a string that starts with given ASCII prefix. /// </summary> internal bool StartsWithRaw(StringHandle rawHandle, string asciiPrefix) { Debug.Assert(!rawHandle.IsVirtual); Debug.Assert(rawHandle.StringKind != StringKind.DotTerminated, "Not supported"); return this.Block.Utf8NullTerminatedStringStartsWithAsciiPrefix(rawHandle.GetHeapOffset(), asciiPrefix); } /// <summary> /// Equivalent to Array.BinarySearch, searches for given raw (non-virtual) handle in given array of ASCII strings. /// </summary> internal int BinarySearchRaw(string[] asciiKeys, StringHandle rawHandle) { Debug.Assert(!rawHandle.IsVirtual); Debug.Assert(rawHandle.StringKind != StringKind.DotTerminated, "Not supported"); return this.Block.BinarySearch(asciiKeys, rawHandle.GetHeapOffset()); } } }
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/tests/JIT/HardwareIntrinsics/Arm/Shared/_UnaryOpScalarTestTemplate.template
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void {TestName}() { var test = new {TemplateName}UnaryOpTest__{TestName}(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.ReadUnaligned test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.ReadUnaligned test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.ReadUnaligned test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class {TemplateName}UnaryOpTest__{TestName} { private struct DataTable { private byte[] outArray; private GCHandle outHandle; private ulong alignment; public DataTable({RetBaseType}[] outArray, int alignment) { int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<{RetBaseType}>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.outArray = new byte[alignment * 2]; this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; } public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public {Op1BaseType} _fld; public static TestStruct Create() { var testStruct = new TestStruct(); testStruct._fld = {NextValueOp1}; return testStruct; } public void RunStructFldScenario({TemplateName}UnaryOpTest__{TestName} testClass) { var result = {Isa}.{Method}(_fld); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = {LargestVectorSize}; private static readonly int RetElementCount = Unsafe.SizeOf<{RetVectorType}<{RetBaseType}>>() / sizeof({RetBaseType}); private static {Op1BaseType} _data; private static {Op1BaseType} _clsVar; private {Op1BaseType} _fld; private DataTable _dataTable; static {TemplateName}UnaryOpTest__{TestName}() { _clsVar = {NextValueOp1}; } public {TemplateName}UnaryOpTest__{TestName}() { Succeeded = true; _fld = {NextValueOp1}; _data = {NextValueOp1}; _dataTable = new DataTable(new {RetBaseType}[RetElementCount], LargestVectorSize); } public bool IsSupported => {Isa}.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = {Isa}.{Method}( Unsafe.ReadUnaligned<{Op1BaseType}>(ref Unsafe.As<{Op1BaseType}, byte>(ref _data)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_data, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof({Isa}).GetMethod(nameof({Isa}.{Method}), new Type[] { typeof({Op1BaseType}) }) .Invoke(null, new object[] { Unsafe.ReadUnaligned<{Op1BaseType}>(ref Unsafe.As<{Op1BaseType}, byte>(ref _data)) }); Unsafe.Write(_dataTable.outArrayPtr, ({RetVectorType}<{RetBaseType}>)(result)); ValidateResult(_data, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = {Isa}.{Method}( _clsVar ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var data = Unsafe.ReadUnaligned<{Op1BaseType}>(ref Unsafe.As<{Op1BaseType}, byte>(ref _data)); var result = {Isa}.{Method}(data); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(data, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new {TemplateName}UnaryOpTest__{TestName}(); var result = {Isa}.{Method}(test._fld); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = {Isa}.{Method}(_fld); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = {Isa}.{Method}(test._fld); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult({Op1BaseType} data, void* result, [CallerMemberName] string method = "") { {RetBaseType}[] outArray = new {RetBaseType}[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<{RetBaseType}, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<{RetVectorType}<{RetBaseType}>>()); ValidateResult(data, outArray, method); } private void ValidateResult({Op1BaseType} data, {RetBaseType}[] result, [CallerMemberName] string method = "") { bool succeeded = true; {TemplateValidationLogic} if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof({Isa})}.{nameof({Isa}.{Method})}<{RetBaseType}>({Op1BaseType}): {Method} failed:"); TestLibrary.TestFramework.LogInformation($" data: {data}"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void {TestName}() { var test = new {TemplateName}UnaryOpTest__{TestName}(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.ReadUnaligned test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.ReadUnaligned test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.ReadUnaligned test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class {TemplateName}UnaryOpTest__{TestName} { private struct DataTable { private byte[] outArray; private GCHandle outHandle; private ulong alignment; public DataTable({RetBaseType}[] outArray, int alignment) { int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<{RetBaseType}>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.outArray = new byte[alignment * 2]; this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; } public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public {Op1BaseType} _fld; public static TestStruct Create() { var testStruct = new TestStruct(); testStruct._fld = {NextValueOp1}; return testStruct; } public void RunStructFldScenario({TemplateName}UnaryOpTest__{TestName} testClass) { var result = {Isa}.{Method}(_fld); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = {LargestVectorSize}; private static readonly int RetElementCount = Unsafe.SizeOf<{RetVectorType}<{RetBaseType}>>() / sizeof({RetBaseType}); private static {Op1BaseType} _data; private static {Op1BaseType} _clsVar; private {Op1BaseType} _fld; private DataTable _dataTable; static {TemplateName}UnaryOpTest__{TestName}() { _clsVar = {NextValueOp1}; } public {TemplateName}UnaryOpTest__{TestName}() { Succeeded = true; _fld = {NextValueOp1}; _data = {NextValueOp1}; _dataTable = new DataTable(new {RetBaseType}[RetElementCount], LargestVectorSize); } public bool IsSupported => {Isa}.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = {Isa}.{Method}( Unsafe.ReadUnaligned<{Op1BaseType}>(ref Unsafe.As<{Op1BaseType}, byte>(ref _data)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_data, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof({Isa}).GetMethod(nameof({Isa}.{Method}), new Type[] { typeof({Op1BaseType}) }) .Invoke(null, new object[] { Unsafe.ReadUnaligned<{Op1BaseType}>(ref Unsafe.As<{Op1BaseType}, byte>(ref _data)) }); Unsafe.Write(_dataTable.outArrayPtr, ({RetVectorType}<{RetBaseType}>)(result)); ValidateResult(_data, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = {Isa}.{Method}( _clsVar ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var data = Unsafe.ReadUnaligned<{Op1BaseType}>(ref Unsafe.As<{Op1BaseType}, byte>(ref _data)); var result = {Isa}.{Method}(data); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(data, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new {TemplateName}UnaryOpTest__{TestName}(); var result = {Isa}.{Method}(test._fld); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = {Isa}.{Method}(_fld); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = {Isa}.{Method}(test._fld); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult({Op1BaseType} data, void* result, [CallerMemberName] string method = "") { {RetBaseType}[] outArray = new {RetBaseType}[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<{RetBaseType}, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<{RetVectorType}<{RetBaseType}>>()); ValidateResult(data, outArray, method); } private void ValidateResult({Op1BaseType} data, {RetBaseType}[] result, [CallerMemberName] string method = "") { bool succeeded = true; {TemplateValidationLogic} if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof({Isa})}.{nameof({Isa}.{Method})}<{RetBaseType}>({Op1BaseType}): {Method} failed:"); TestLibrary.TestFramework.LogInformation($" data: {data}"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/ShiftLogical.Vector128.Int64.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void ShiftLogical_Vector128_Int64() { var test = new SimpleBinaryOpTest__ShiftLogical_Vector128_Int64(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__ShiftLogical_Vector128_Int64 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int64[] inArray1, Int64[] inArray2, Int64[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int64> _fld1; public Vector128<Int64> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__ShiftLogical_Vector128_Int64 testClass) { var result = AdvSimd.ShiftLogical(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__ShiftLogical_Vector128_Int64 testClass) { fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector128<Int64>* pFld2 = &_fld2) { var result = AdvSimd.ShiftLogical( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector128((Int64*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static Int64[] _data1 = new Int64[Op1ElementCount]; private static Int64[] _data2 = new Int64[Op2ElementCount]; private static Vector128<Int64> _clsVar1; private static Vector128<Int64> _clsVar2; private Vector128<Int64> _fld1; private Vector128<Int64> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__ShiftLogical_Vector128_Int64() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); } public SimpleBinaryOpTest__ShiftLogical_Vector128_Int64() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } _dataTable = new DataTable(_data1, _data2, new Int64[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.ShiftLogical( Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.ShiftLogical( AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLogical), new Type[] { typeof(Vector128<Int64>), typeof(Vector128<Int64>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLogical), new Type[] { typeof(Vector128<Int64>), typeof(Vector128<Int64>) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.ShiftLogical( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int64>* pClsVar1 = &_clsVar1) fixed (Vector128<Int64>* pClsVar2 = &_clsVar2) { var result = AdvSimd.ShiftLogical( AdvSimd.LoadVector128((Int64*)(pClsVar1)), AdvSimd.LoadVector128((Int64*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr); var result = AdvSimd.ShiftLogical(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)); var result = AdvSimd.ShiftLogical(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__ShiftLogical_Vector128_Int64(); var result = AdvSimd.ShiftLogical(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__ShiftLogical_Vector128_Int64(); fixed (Vector128<Int64>* pFld1 = &test._fld1) fixed (Vector128<Int64>* pFld2 = &test._fld2) { var result = AdvSimd.ShiftLogical( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector128((Int64*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.ShiftLogical(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector128<Int64>* pFld2 = &_fld2) { var result = AdvSimd.ShiftLogical( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector128((Int64*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.ShiftLogical(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.ShiftLogical( AdvSimd.LoadVector128((Int64*)(&test._fld1)), AdvSimd.LoadVector128((Int64*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int64> op1, Vector128<Int64> op2, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int64[] left, Int64[] right, Int64[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.ShiftLogical(left[i], right[i]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftLogical)}<Int64>(Vector128<Int64>, Vector128<Int64>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void ShiftLogical_Vector128_Int64() { var test = new SimpleBinaryOpTest__ShiftLogical_Vector128_Int64(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__ShiftLogical_Vector128_Int64 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int64[] inArray1, Int64[] inArray2, Int64[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int64> _fld1; public Vector128<Int64> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__ShiftLogical_Vector128_Int64 testClass) { var result = AdvSimd.ShiftLogical(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__ShiftLogical_Vector128_Int64 testClass) { fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector128<Int64>* pFld2 = &_fld2) { var result = AdvSimd.ShiftLogical( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector128((Int64*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static Int64[] _data1 = new Int64[Op1ElementCount]; private static Int64[] _data2 = new Int64[Op2ElementCount]; private static Vector128<Int64> _clsVar1; private static Vector128<Int64> _clsVar2; private Vector128<Int64> _fld1; private Vector128<Int64> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__ShiftLogical_Vector128_Int64() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); } public SimpleBinaryOpTest__ShiftLogical_Vector128_Int64() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } _dataTable = new DataTable(_data1, _data2, new Int64[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.ShiftLogical( Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.ShiftLogical( AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLogical), new Type[] { typeof(Vector128<Int64>), typeof(Vector128<Int64>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLogical), new Type[] { typeof(Vector128<Int64>), typeof(Vector128<Int64>) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.ShiftLogical( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int64>* pClsVar1 = &_clsVar1) fixed (Vector128<Int64>* pClsVar2 = &_clsVar2) { var result = AdvSimd.ShiftLogical( AdvSimd.LoadVector128((Int64*)(pClsVar1)), AdvSimd.LoadVector128((Int64*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr); var result = AdvSimd.ShiftLogical(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray2Ptr)); var result = AdvSimd.ShiftLogical(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__ShiftLogical_Vector128_Int64(); var result = AdvSimd.ShiftLogical(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__ShiftLogical_Vector128_Int64(); fixed (Vector128<Int64>* pFld1 = &test._fld1) fixed (Vector128<Int64>* pFld2 = &test._fld2) { var result = AdvSimd.ShiftLogical( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector128((Int64*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.ShiftLogical(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector128<Int64>* pFld2 = &_fld2) { var result = AdvSimd.ShiftLogical( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector128((Int64*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.ShiftLogical(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.ShiftLogical( AdvSimd.LoadVector128((Int64*)(&test._fld1)), AdvSimd.LoadVector128((Int64*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int64> op1, Vector128<Int64> op2, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int64[] left, Int64[] right, Int64[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.ShiftLogical(left[i], right[i]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftLogical)}<Int64>(Vector128<Int64>, Vector128<Int64>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/libraries/System.Net.Http/tests/FunctionalTests/TelemetryTest.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Concurrent; using System.Collections.Generic; using System.Diagnostics.Tracing; using System.IO; using System.Linq; using System.Net.Quic; using System.Net.Quic.Implementations; using System.Net.Test.Common; using System.Text; using System.Threading; using System.Threading.Tasks; using Microsoft.DotNet.RemoteExecutor; using Xunit; using Xunit.Abstractions; namespace System.Net.Http.Functional.Tests { public abstract class TelemetryTest : HttpClientHandlerTestBase { public TelemetryTest(ITestOutputHelper output) : base(output) { } private string QuicImplementationProvider => UseQuicImplementationProvider?.GetType().Name ?? string.Empty; private static QuicImplementationProvider GetQuicImplementationProvider(string provider) => provider.Contains(nameof(QuicImplementationProviders.MsQuic)) ? QuicImplementationProviders.MsQuic : provider.Contains(nameof(QuicImplementationProviders.Mock)) ? QuicImplementationProviders.Mock : null; [Fact] public static void EventSource_ExistsWithCorrectId() { Type esType = typeof(HttpClient).Assembly.GetType("System.Net.Http.HttpTelemetry", throwOnError: true, ignoreCase: false); Assert.NotNull(esType); Assert.Equal("System.Net.Http", EventSource.GetName(esType)); Assert.Equal(Guid.Parse("d30b5633-7ef1-5485-b4e0-94979b102068"), EventSource.GetGuid(esType)); Assert.NotEmpty(EventSource.GenerateManifest(esType, esType.Assembly.Location)); } public static IEnumerable<object[]> TestMethods_MemberData() { yield return new object[] { "GetAsync" }; yield return new object[] { "SendAsync" }; yield return new object[] { "UnbufferedSendAsync" }; yield return new object[] { "GetStringAsync" }; yield return new object[] { "GetByteArrayAsync" }; yield return new object[] { "GetStreamAsync" }; yield return new object[] { "InvokerSendAsync" }; yield return new object[] { "Send" }; yield return new object[] { "UnbufferedSend" }; yield return new object[] { "InvokerSend" }; } [OuterLoop] [ConditionalTheory(typeof(RemoteExecutor), nameof(RemoteExecutor.IsSupported))] [MemberData(nameof(TestMethods_MemberData))] public void EventSource_SuccessfulRequest_LogsStartStop(string testMethod) { if (UseVersion.Major != 1 && !testMethod.EndsWith("Async")) { // Synchronous requests are only supported for HTTP/1.1 return; } RemoteExecutor.Invoke(static async (useVersionString, quicProvider, testMethod) => { const int ResponseContentLength = 42; Version version = Version.Parse(useVersionString); using var listener = new TestEventListener("System.Net.Http", EventLevel.Verbose, eventCounterInterval: 0.1d); listener.AddActivityTracking(); bool buffersResponse = false; var events = new ConcurrentQueue<(EventWrittenEventArgs Event, Guid ActivityId)>(); await listener.RunWithCallbackAsync(e => events.Enqueue((e, e.ActivityId)), async () => { await GetFactoryForVersion(version, GetQuicImplementationProvider(quicProvider)).CreateClientAndServerAsync( async uri => { using HttpClientHandler handler = CreateHttpClientHandler(version, GetQuicImplementationProvider(quicProvider)); using HttpClient client = CreateHttpClient(handler, useVersionString); using var invoker = new HttpMessageInvoker(handler); var request = new HttpRequestMessage(HttpMethod.Get, uri) { Version = version }; switch (testMethod) { case "GetAsync": { buffersResponse = true; await client.GetAsync(uri); } break; case "Send": { buffersResponse = true; await Task.Run(() => client.Send(request)); } break; case "UnbufferedSend": { buffersResponse = false; using HttpResponseMessage response = await Task.Run(() => client.Send(request, HttpCompletionOption.ResponseHeadersRead)); response.Content.CopyTo(Stream.Null, null, default); } break; case "SendAsync": { buffersResponse = true; await client.SendAsync(request); } break; case "UnbufferedSendAsync": { buffersResponse = false; using HttpResponseMessage response = await client.SendAsync(request, HttpCompletionOption.ResponseHeadersRead); await response.Content.CopyToAsync(Stream.Null); } break; case "GetStringAsync": { buffersResponse = true; await client.GetStringAsync(uri); } break; case "GetByteArrayAsync": { buffersResponse = true; await client.GetByteArrayAsync(uri); } break; case "GetStreamAsync": { buffersResponse = false; using Stream responseStream = await client.GetStreamAsync(uri); await responseStream.CopyToAsync(Stream.Null); } break; case "InvokerSend": { buffersResponse = false; using HttpResponseMessage response = await Task.Run(() => invoker.Send(request, cancellationToken: default)); await response.Content.CopyToAsync(Stream.Null); } break; case "InvokerSendAsync": { buffersResponse = false; using HttpResponseMessage response = await invoker.SendAsync(request, cancellationToken: default); await response.Content.CopyToAsync(Stream.Null); } break; } }, async server => { await server.AcceptConnectionAsync(async connection => { await connection.ReadRequestDataAsync(); await WaitForEventCountersAsync(events); await connection.SendResponseAsync(content: new string('a', ResponseContentLength)); }); }); await WaitForEventCountersAsync(events); }); Assert.DoesNotContain(events, e => e.Event.EventId == 0); // errors from the EventSource itself ValidateStartFailedStopEvents(events, version); ValidateConnectionEstablishedClosed(events, version); ValidateRequestResponseStartStopEvents( events, requestContentLength: null, responseContentLength: buffersResponse ? ResponseContentLength : null, count: 1); ValidateEventCounters(events, requestCount: 1, shouldHaveFailures: false, versionMajor: version.Major); }, UseVersion.ToString(), QuicImplementationProvider, testMethod).Dispose(); } [OuterLoop] [ConditionalTheory(typeof(RemoteExecutor), nameof(RemoteExecutor.IsSupported))] [MemberData(nameof(TestMethods_MemberData))] public void EventSource_UnsuccessfulRequest_LogsStartFailedStop(string testMethod) { if (UseVersion.Major != 1 && !testMethod.EndsWith("Async")) { // Synchronous requests are only supported for HTTP/1.1 return; } RemoteExecutor.Invoke(static async (useVersionString, quicProvider, testMethod) => { Version version = Version.Parse(useVersionString); using var listener = new TestEventListener("System.Net.Http", EventLevel.Verbose, eventCounterInterval: 0.1d); listener.AddActivityTracking(); var events = new ConcurrentQueue<(EventWrittenEventArgs Event, Guid ActivityId)>(); await listener.RunWithCallbackAsync(e => events.Enqueue((e, e.ActivityId)), async () => { var semaphore = new SemaphoreSlim(0, 1); var cts = new CancellationTokenSource(); await GetFactoryForVersion(version, GetQuicImplementationProvider(quicProvider)).CreateClientAndServerAsync( async uri => { using HttpClientHandler handler = CreateHttpClientHandler(version, GetQuicImplementationProvider(quicProvider)); using HttpClient client = CreateHttpClient(handler, useVersionString); using var invoker = new HttpMessageInvoker(handler); var request = new HttpRequestMessage(HttpMethod.Get, uri) { Version = version }; switch (testMethod) { case "GetAsync": await Assert.ThrowsAsync<TaskCanceledException>(async () => await client.GetAsync(uri, cts.Token)); break; case "Send": await Assert.ThrowsAsync<TaskCanceledException>(async () => await Task.Run(() => client.Send(request, cts.Token))); break; case "UnbufferedSend": await Assert.ThrowsAsync<TaskCanceledException>(async () => await Task.Run(() => client.Send(request, HttpCompletionOption.ResponseHeadersRead, cts.Token))); break; case "SendAsync": await Assert.ThrowsAsync<TaskCanceledException>(async () => await client.SendAsync(request, cts.Token)); break; case "UnbufferedSendAsync": await Assert.ThrowsAsync<TaskCanceledException>(async () => await client.SendAsync(request, HttpCompletionOption.ResponseHeadersRead, cts.Token)); break; case "GetStringAsync": await Assert.ThrowsAsync<TaskCanceledException>(async () => await client.GetStringAsync(uri, cts.Token)); break; case "GetByteArrayAsync": await Assert.ThrowsAsync<TaskCanceledException>(async () => await client.GetByteArrayAsync(uri, cts.Token)); break; case "GetStreamAsync": await Assert.ThrowsAsync<TaskCanceledException>(async () => await client.GetStreamAsync(uri, cts.Token)); break; case "InvokerSend": await Assert.ThrowsAsync<TaskCanceledException>(async () => await Task.Run(() => invoker.Send(request, cts.Token))); break; case "InvokerSendAsync": await Assert.ThrowsAsync<TaskCanceledException>(async () => await invoker.SendAsync(request, cts.Token)); break; } semaphore.Release(); }, async server => { await server.AcceptConnectionAsync(async connection => { await connection.ReadRequestDataAsync(); await WaitForEventCountersAsync(events); cts.Cancel(); Assert.True(await semaphore.WaitAsync(TimeSpan.FromSeconds(30))); }); }); await WaitForEventCountersAsync(events); }); Assert.DoesNotContain(events, e => e.Event.EventId == 0); // errors from the EventSource itself ValidateStartFailedStopEvents(events, version, shouldHaveFailures: true); ValidateConnectionEstablishedClosed(events, version); ValidateEventCounters(events, requestCount: 1, shouldHaveFailures: true, versionMajor: version.Major); }, UseVersion.ToString(), QuicImplementationProvider, testMethod).Dispose(); } [OuterLoop] [ConditionalTheory(typeof(RemoteExecutor), nameof(RemoteExecutor.IsSupported))] [InlineData("PostAsync")] [InlineData("Send")] [InlineData("SendAsync")] [InlineData("SendChunkedAsync")] [InlineData("InvokerSend")] [InlineData("InvokerSendAsync")] public void EventSource_SendingRequestContent_LogsRequestContentStartStop(string testMethod) { if (UseVersion.Major != 1 && !testMethod.EndsWith("Async")) { // Synchronous requests are only supported for HTTP/1.1 return; } RemoteExecutor.Invoke(static async (useVersionString, quicProvider, testMethod) => { const int RequestContentLength = 42; const int ResponseContentLength = 43; Version version = Version.Parse(useVersionString); using var listener = new TestEventListener("System.Net.Http", EventLevel.Verbose, eventCounterInterval: 0.1d); listener.AddActivityTracking(); var events = new ConcurrentQueue<(EventWrittenEventArgs Event, Guid ActivityId)>(); await listener.RunWithCallbackAsync(e => events.Enqueue((e, e.ActivityId)), async () => { await GetFactoryForVersion(version, GetQuicImplementationProvider(quicProvider)).CreateClientAndServerAsync( async uri => { using HttpClientHandler handler = CreateHttpClientHandler(version, GetQuicImplementationProvider(quicProvider)); using HttpClient client = CreateHttpClient(handler, useVersionString); using var invoker = new HttpMessageInvoker(handler); var request = new HttpRequestMessage(HttpMethod.Get, uri) { Version = version }; var content = new ByteArrayContent(Encoding.ASCII.GetBytes(new string('a', RequestContentLength))); request.Content = content; switch (testMethod) { case "PostAsync": await client.PostAsync(uri, content); break; case "Send": await Task.Run(() => client.Send(request)); break; case "SendAsync": await client.SendAsync(request); break; case "SendChunkedAsync": request.Headers.TransferEncodingChunked = true; await client.SendAsync(request); break; case "InvokerSend": HttpResponseMessage syncResponse = await Task.Run(() => invoker.Send(request, cancellationToken: default)); await syncResponse.Content.CopyToAsync(Stream.Null); break; case "InvokerSendAsync": HttpResponseMessage asyncResponse = await invoker.SendAsync(request, cancellationToken: default); await asyncResponse.Content.CopyToAsync(Stream.Null); break; } }, async server => { await server.AcceptConnectionAsync(async connection => { await connection.ReadRequestDataAsync(); await WaitForEventCountersAsync(events); await connection.SendResponseAsync(content: new string('a', ResponseContentLength)); }); }); await WaitForEventCountersAsync(events); }); Assert.DoesNotContain(events, e => e.Event.EventId == 0); // errors from the EventSource itself ValidateStartFailedStopEvents(events, version); ValidateConnectionEstablishedClosed(events, version); ValidateRequestResponseStartStopEvents( events, RequestContentLength, responseContentLength: testMethod.StartsWith("InvokerSend") ? null : ResponseContentLength, count: 1); ValidateEventCounters(events, requestCount: 1, shouldHaveFailures: false, versionMajor: version.Major); }, UseVersion.ToString(), QuicImplementationProvider, testMethod).Dispose(); } private static void ValidateStartFailedStopEvents(ConcurrentQueue<(EventWrittenEventArgs Event, Guid ActivityId)> events, Version version, bool shouldHaveFailures = false, int count = 1) { (EventWrittenEventArgs Event, Guid ActivityId)[] starts = events.Where(e => e.Event.EventName == "RequestStart").ToArray(); foreach (EventWrittenEventArgs startEvent in starts.Select(e => e.Event)) { Assert.Equal(7, startEvent.Payload.Count); Assert.StartsWith("http", (string)startEvent.Payload[0]); Assert.NotEmpty((string)startEvent.Payload[1]); // host Assert.True(startEvent.Payload[2] is int port && port >= 0 && port <= 65535); Assert.NotEmpty((string)startEvent.Payload[3]); // pathAndQuery byte versionMajor = Assert.IsType<byte>(startEvent.Payload[4]); Assert.Equal(version.Major, versionMajor); byte versionMinor = Assert.IsType<byte>(startEvent.Payload[5]); Assert.Equal(version.Minor, versionMinor); Assert.InRange((HttpVersionPolicy)startEvent.Payload[6], HttpVersionPolicy.RequestVersionOrLower, HttpVersionPolicy.RequestVersionExact); } Assert.Equal(count, starts.Length); (EventWrittenEventArgs Event, Guid ActivityId)[] stops = events.Where(e => e.Event.EventName == "RequestStop").ToArray(); Assert.All(stops, stopEvent => Assert.Empty(stopEvent.Event.Payload)); ValidateSameActivityIds(starts, stops); (EventWrittenEventArgs Event, Guid ActivityId)[] failures = events.Where(e => e.Event.EventName == "RequestFailed").ToArray(); Assert.All(failures, failedEvent => Assert.Empty(failedEvent.Event.Payload)); if (shouldHaveFailures) { ValidateSameActivityIds(starts, failures); } else { Assert.Empty(failures); } } private static void ValidateConnectionEstablishedClosed(ConcurrentQueue<(EventWrittenEventArgs Event, Guid ActivityId)> events, Version version, int count = 1) { EventWrittenEventArgs[] connectionsEstablished = events.Select(e => e.Event).Where(e => e.EventName == "ConnectionEstablished").ToArray(); Assert.Equal(count, connectionsEstablished.Length); foreach (EventWrittenEventArgs connectionEstablished in connectionsEstablished) { Assert.Equal(2, connectionEstablished.Payload.Count); Assert.Equal(version.Major, (byte)connectionEstablished.Payload[0]); Assert.Equal(version.Minor, (byte)connectionEstablished.Payload[1]); } EventWrittenEventArgs[] connectionsClosed = events.Select(e => e.Event).Where(e => e.EventName == "ConnectionClosed").ToArray(); Assert.Equal(count, connectionsClosed.Length); foreach (EventWrittenEventArgs connectionClosed in connectionsClosed) { Assert.Equal(2, connectionClosed.Payload.Count); Assert.Equal(version.Major, (byte)connectionClosed.Payload[0]); Assert.Equal(version.Minor, (byte)connectionClosed.Payload[1]); } } private static void ValidateRequestResponseStartStopEvents(ConcurrentQueue<(EventWrittenEventArgs Event, Guid ActivityId)> events, int? requestContentLength, int? responseContentLength, int count) { (EventWrittenEventArgs Event, Guid ActivityId)[] requestHeadersStarts = events.Where(e => e.Event.EventName == "RequestHeadersStart").ToArray(); Assert.Equal(count, requestHeadersStarts.Length); Assert.All(requestHeadersStarts, r => Assert.Empty(r.Event.Payload)); (EventWrittenEventArgs Event, Guid ActivityId)[] requestHeadersStops = events.Where(e => e.Event.EventName == "RequestHeadersStop").ToArray(); Assert.Equal(count, requestHeadersStops.Length); Assert.All(requestHeadersStops, r => Assert.Empty(r.Event.Payload)); ValidateSameActivityIds(requestHeadersStarts, requestHeadersStops); (EventWrittenEventArgs Event, Guid ActivityId)[] requestContentStarts = events.Where(e => e.Event.EventName == "RequestContentStart").ToArray(); Assert.Equal(requestContentLength.HasValue ? count : 0, requestContentStarts.Length); Assert.All(requestContentStarts, r => Assert.Empty(r.Event.Payload)); (EventWrittenEventArgs Event, Guid ActivityId)[] requestContentStops = events.Where(e => e.Event.EventName == "RequestContentStop").ToArray(); Assert.Equal(requestContentLength.HasValue ? count : 0, requestContentStops.Length); foreach (EventWrittenEventArgs requestContentStop in requestContentStops.Select(e => e.Event)) { object payload = Assert.Single(requestContentStop.Payload); Assert.True(payload is long); Assert.Equal(requestContentLength.Value, (long)payload); } ValidateSameActivityIds(requestContentStarts, requestContentStops); (EventWrittenEventArgs Event, Guid ActivityId)[] responseHeadersStarts = events.Where(e => e.Event.EventName == "ResponseHeadersStart").ToArray(); Assert.Equal(count, responseHeadersStarts.Length); Assert.All(responseHeadersStarts, r => Assert.Empty(r.Event.Payload)); (EventWrittenEventArgs Event, Guid ActivityId)[] responseHeadersStops = events.Where(e => e.Event.EventName == "ResponseHeadersStop").ToArray(); Assert.Equal(count, responseHeadersStops.Length); Assert.All(responseHeadersStops, r => Assert.Empty(r.Event.Payload)); ValidateSameActivityIds(responseHeadersStarts, responseHeadersStops); (EventWrittenEventArgs Event, Guid ActivityId)[] responseContentStarts = events.Where(e => e.Event.EventName == "ResponseContentStart").ToArray(); Assert.Equal(responseContentLength.HasValue ? count : 0, responseContentStarts.Length); Assert.All(responseContentStarts, r => Assert.Empty(r.Event.Payload)); (EventWrittenEventArgs Event, Guid ActivityId)[] responseContentStops = events.Where(e => e.Event.EventName == "ResponseContentStop").ToArray(); Assert.Equal(responseContentLength.HasValue ? count : 0, responseContentStops.Length); Assert.All(responseContentStops, r => Assert.Empty(r.Event.Payload)); ValidateSameActivityIds(responseContentStarts, responseContentStops); } private static void ValidateSameActivityIds((EventWrittenEventArgs Event, Guid ActivityId)[] a, (EventWrittenEventArgs Event, Guid ActivityId)[] b) { Assert.Equal(a.Length, b.Length); for (int i = 0; i < a.Length; i++) { Assert.NotEqual(Guid.Empty, a[i].ActivityId); Assert.Equal(a[i].ActivityId, b[i].ActivityId); } } private static void ValidateEventCounters(ConcurrentQueue<(EventWrittenEventArgs Event, Guid ActivityId)> events, int requestCount, bool shouldHaveFailures, int versionMajor, bool requestLeftQueue = false) { Dictionary<string, double[]> eventCounters = events .Select(e => e.Event) .Where(e => e.EventName == "EventCounters") .Select(e => (IDictionary<string, object>)e.Payload.Single()) .GroupBy(d => (string)d["Name"], d => (double)(d.ContainsKey("Mean") ? d["Mean"] : d["Increment"])) .ToDictionary(p => p.Key, p => p.ToArray()); Assert.True(eventCounters.TryGetValue("requests-started", out double[] requestsStarted)); Assert.Equal(requestCount, requestsStarted[^1]); Assert.True(eventCounters.TryGetValue("requests-started-rate", out double[] requestRate)); Assert.Contains(requestRate, r => r > 0); Assert.True(eventCounters.TryGetValue("requests-failed", out double[] requestsFailures)); Assert.True(eventCounters.TryGetValue("requests-failed-rate", out double[] requestsFailureRate)); if (shouldHaveFailures) { Assert.Equal(1, requestsFailures[^1]); Assert.Contains(requestsFailureRate, r => r > 0); } else { Assert.All(requestsFailures, a => Assert.Equal(0, a)); Assert.All(requestsFailureRate, r => Assert.Equal(0, r)); } Assert.True(eventCounters.TryGetValue("current-requests", out double[] currentRequests)); Assert.Contains(currentRequests, c => c > 0); Assert.Equal(0, currentRequests[^1]); Assert.True(eventCounters.TryGetValue("http11-connections-current-total", out double[] http11ConnectionsTotal)); Assert.All(http11ConnectionsTotal, c => Assert.True(c >= 0)); Assert.Equal(0, http11ConnectionsTotal[^1]); Assert.True(eventCounters.TryGetValue("http20-connections-current-total", out double[] http20ConnectionsTotal)); Assert.All(http20ConnectionsTotal, c => Assert.True(c >= 0)); Assert.Equal(0, http20ConnectionsTotal[^1]); Assert.True(eventCounters.TryGetValue("http30-connections-current-total", out double[] http30ConnectionsTotal)); Assert.All(http30ConnectionsTotal, c => Assert.True(c >= 0)); Assert.Equal(0, http30ConnectionsTotal[^1]); if (versionMajor == 1) { Assert.Contains(http11ConnectionsTotal, d => d > 0); Assert.DoesNotContain(http20ConnectionsTotal, d => d > 0); Assert.DoesNotContain(http30ConnectionsTotal, d => d > 0); } else if (versionMajor == 2) { Assert.DoesNotContain(http11ConnectionsTotal, d => d > 0); Assert.Contains(http20ConnectionsTotal, d => d > 0); Assert.DoesNotContain(http30ConnectionsTotal, d => d > 0); } else { Assert.DoesNotContain(http11ConnectionsTotal, d => d > 0); Assert.DoesNotContain(http20ConnectionsTotal, d => d > 0); Assert.Contains(http30ConnectionsTotal, d => d > 0); } Assert.True(eventCounters.TryGetValue("http11-requests-queue-duration", out double[] http11requestQueueDurations)); Assert.All(http11requestQueueDurations, d => Assert.True(d >= 0)); Assert.Equal(0, http11requestQueueDurations[^1]); Assert.True(eventCounters.TryGetValue("http20-requests-queue-duration", out double[] http20requestQueueDurations)); Assert.All(http20requestQueueDurations, d => Assert.True(d >= 0)); Assert.Equal(0, http20requestQueueDurations[^1]); Assert.True(eventCounters.TryGetValue("http30-requests-queue-duration", out double[] http30requestQueueDurations)); Assert.All(http30requestQueueDurations, d => Assert.True(d >= 0)); Assert.Equal(0, http30requestQueueDurations[^1]); if (requestLeftQueue) { if (versionMajor == 1) { Assert.Contains(http11requestQueueDurations, d => d > 0); Assert.DoesNotContain(http20requestQueueDurations, d => d > 0); Assert.DoesNotContain(http30requestQueueDurations, d => d > 0); } else if (versionMajor == 2) { Assert.DoesNotContain(http11requestQueueDurations, d => d > 0); Assert.Contains(http20requestQueueDurations, d => d > 0); Assert.DoesNotContain(http30requestQueueDurations, d => d > 0); } else { Assert.DoesNotContain(http11requestQueueDurations, d => d > 0); Assert.DoesNotContain(http20requestQueueDurations, d => d > 0); Assert.Contains(http30requestQueueDurations, d => d > 0); } } } [OuterLoop] [ConditionalFact(typeof(RemoteExecutor), nameof(RemoteExecutor.IsSupported))] public void EventSource_ConnectionPoolAtMaxConnections_LogsRequestLeftQueue() { if (UseVersion.Major == 3 && UseQuicImplementationProvider == QuicImplementationProviders.Mock) { return; } RemoteExecutor.Invoke(static async (useVersionString, quicProvider) => { Version version = Version.Parse(useVersionString); using var listener = new TestEventListener("System.Net.Http", EventLevel.Verbose, eventCounterInterval: 0.1d); listener.AddActivityTracking(); var events = new ConcurrentQueue<(EventWrittenEventArgs Event, Guid ActivityId)>(); await listener.RunWithCallbackAsync(e => events.Enqueue((e, e.ActivityId)), async () => { var firstRequestReceived = new SemaphoreSlim(0, 1); var secondRequestSent = new SemaphoreSlim(0, 1); var firstRequestFinished = new SemaphoreSlim(0, 1); await GetFactoryForVersion(version, GetQuicImplementationProvider(quicProvider)).CreateClientAndServerAsync( async uri => { using HttpClientHandler handler = CreateHttpClientHandler(version, GetQuicImplementationProvider(quicProvider)); using HttpClient client = CreateHttpClient(handler, useVersionString); var socketsHttpHandler = GetUnderlyingSocketsHttpHandler(handler); socketsHttpHandler.MaxConnectionsPerServer = 1; socketsHttpHandler.SslOptions.RemoteCertificateValidationCallback = delegate { return true; }; // Dummy request to ensure that the MaxConcurrentStreams setting has been acknowledged await client.GetStringAsync(uri); Task firstRequest = client.GetStringAsync(uri); Assert.True(await firstRequestReceived.WaitAsync(TimeSpan.FromSeconds(10))); // We are now at the connection limit, the next request will wait for the first one to complete Task secondRequest = client.GetStringAsync(uri); secondRequestSent.Release(); // We are asserting that ActivityIds between Start/Stop pairs match below // We wait for the first request to finish to ensure that RequestStop events // are logged in the same order as RequestStarts await firstRequest; firstRequestFinished.Release(); await secondRequest; }, async server => { GenericLoopbackConnection connection; if (server is Http2LoopbackServer http2Server) { connection = await http2Server.EstablishConnectionAsync(new SettingsEntry { SettingId = SettingId.MaxConcurrentStreams, Value = 1 }); } else { connection = await server.EstablishGenericConnectionAsync(); } using (connection) { // Dummy request to ensure that the MaxConcurrentStreams setting has been acknowledged await connection.ReadRequestDataAsync(readBody: false); await connection.SendResponseAsync(); // First request await connection.ReadRequestDataAsync(readBody: false); firstRequestReceived.Release(); Assert.True(await secondRequestSent.WaitAsync(TimeSpan.FromSeconds(10))); await WaitForEventCountersAsync(events); await connection.SendResponseAsync(); // Second request Assert.True(await firstRequestFinished.WaitAsync(TimeSpan.FromSeconds(10))); await connection.ReadRequestDataAsync(readBody: false); await connection.SendResponseAsync(); }; }, options: new Http3Options { MaxBidirectionalStreams = 1 }); await WaitForEventCountersAsync(events); }); Assert.DoesNotContain(events, e => e.Event.EventId == 0); // errors from the EventSource itself ValidateStartFailedStopEvents(events, version, count: 3); ValidateConnectionEstablishedClosed(events, version); var requestLeftQueueEvents = events.Where(e => e.Event.EventName == "RequestLeftQueue"); Assert.InRange(requestLeftQueueEvents.Count(), 2, version.Major == 3 ? 3 : 2); foreach (var (e, _) in requestLeftQueueEvents) { Assert.Equal(3, e.Payload.Count); Assert.True((double)e.Payload[0] > 0); // timeSpentOnQueue Assert.Equal(version.Major, (byte)e.Payload[1]); Assert.Equal(version.Minor, (byte)e.Payload[2]); } Guid requestLeftQueueId = requestLeftQueueEvents.Last().ActivityId; Assert.Equal(requestLeftQueueId, events.Where(e => e.Event.EventName == "RequestStart").Last().ActivityId); ValidateRequestResponseStartStopEvents(events, requestContentLength: null, responseContentLength: 0, count: 3); ValidateEventCounters(events, requestCount: 3, shouldHaveFailures: false, versionMajor: version.Major, requestLeftQueue: true); }, UseVersion.ToString(), QuicImplementationProvider).Dispose(); } private static async Task WaitForEventCountersAsync(ConcurrentQueue<(EventWrittenEventArgs Event, Guid ActivityId)> events) { DateTime startTime = DateTime.UtcNow; int startCount = events.Count; while (events.Skip(startCount).Count(e => IsRequestsStartedEventCounter(e.Event)) < 3) { if (DateTime.UtcNow.Subtract(startTime) > TimeSpan.FromSeconds(30)) throw new TimeoutException($"Timed out waiting for EventCounters"); await Task.Delay(100); } static bool IsRequestsStartedEventCounter(EventWrittenEventArgs e) { if (e.EventName != "EventCounters") return false; var dictionary = (IDictionary<string, object>)e.Payload.Single(); return (string)dictionary["Name"] == "requests-started"; } } } public sealed class TelemetryTest_Http11 : TelemetryTest { public TelemetryTest_Http11(ITestOutputHelper output) : base(output) { } } public sealed class TelemetryTest_Http20 : TelemetryTest { protected override Version UseVersion => HttpVersion.Version20; public TelemetryTest_Http20(ITestOutputHelper output) : base(output) { } } [ConditionalClass(typeof(HttpClientHandlerTestBase), nameof(IsMsQuicSupported))] public sealed class TelemetryTest_Http30_MsQuic : TelemetryTest { protected override Version UseVersion => HttpVersion.Version30; protected override QuicImplementationProvider UseQuicImplementationProvider => QuicImplementationProviders.MsQuic; public TelemetryTest_Http30_MsQuic(ITestOutputHelper output) : base(output) { } } [ConditionalClass(typeof(HttpClientHandlerTestBase), nameof(IsMockQuicSupported))] public sealed class TelemetryTest_Http30_Mock : TelemetryTest { protected override Version UseVersion => HttpVersion.Version30; protected override QuicImplementationProvider UseQuicImplementationProvider => QuicImplementationProviders.Mock; public TelemetryTest_Http30_Mock(ITestOutputHelper output) : base(output) { } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Concurrent; using System.Collections.Generic; using System.Diagnostics.Tracing; using System.IO; using System.Linq; using System.Net.Quic; using System.Net.Quic.Implementations; using System.Net.Test.Common; using System.Text; using System.Threading; using System.Threading.Tasks; using Microsoft.DotNet.RemoteExecutor; using Xunit; using Xunit.Abstractions; namespace System.Net.Http.Functional.Tests { public abstract class TelemetryTest : HttpClientHandlerTestBase { public TelemetryTest(ITestOutputHelper output) : base(output) { } private string QuicImplementationProvider => UseQuicImplementationProvider?.GetType().Name ?? string.Empty; private static QuicImplementationProvider GetQuicImplementationProvider(string provider) => provider.Contains(nameof(QuicImplementationProviders.MsQuic)) ? QuicImplementationProviders.MsQuic : provider.Contains(nameof(QuicImplementationProviders.Mock)) ? QuicImplementationProviders.Mock : null; [Fact] public static void EventSource_ExistsWithCorrectId() { Type esType = typeof(HttpClient).Assembly.GetType("System.Net.Http.HttpTelemetry", throwOnError: true, ignoreCase: false); Assert.NotNull(esType); Assert.Equal("System.Net.Http", EventSource.GetName(esType)); Assert.Equal(Guid.Parse("d30b5633-7ef1-5485-b4e0-94979b102068"), EventSource.GetGuid(esType)); Assert.NotEmpty(EventSource.GenerateManifest(esType, esType.Assembly.Location)); } public static IEnumerable<object[]> TestMethods_MemberData() { yield return new object[] { "GetAsync" }; yield return new object[] { "SendAsync" }; yield return new object[] { "UnbufferedSendAsync" }; yield return new object[] { "GetStringAsync" }; yield return new object[] { "GetByteArrayAsync" }; yield return new object[] { "GetStreamAsync" }; yield return new object[] { "InvokerSendAsync" }; yield return new object[] { "Send" }; yield return new object[] { "UnbufferedSend" }; yield return new object[] { "InvokerSend" }; } [OuterLoop] [ConditionalTheory(typeof(RemoteExecutor), nameof(RemoteExecutor.IsSupported))] [MemberData(nameof(TestMethods_MemberData))] public void EventSource_SuccessfulRequest_LogsStartStop(string testMethod) { if (UseVersion.Major != 1 && !testMethod.EndsWith("Async")) { // Synchronous requests are only supported for HTTP/1.1 return; } RemoteExecutor.Invoke(static async (useVersionString, quicProvider, testMethod) => { const int ResponseContentLength = 42; Version version = Version.Parse(useVersionString); using var listener = new TestEventListener("System.Net.Http", EventLevel.Verbose, eventCounterInterval: 0.1d); listener.AddActivityTracking(); bool buffersResponse = false; var events = new ConcurrentQueue<(EventWrittenEventArgs Event, Guid ActivityId)>(); await listener.RunWithCallbackAsync(e => events.Enqueue((e, e.ActivityId)), async () => { await GetFactoryForVersion(version, GetQuicImplementationProvider(quicProvider)).CreateClientAndServerAsync( async uri => { using HttpClientHandler handler = CreateHttpClientHandler(version, GetQuicImplementationProvider(quicProvider)); using HttpClient client = CreateHttpClient(handler, useVersionString); using var invoker = new HttpMessageInvoker(handler); var request = new HttpRequestMessage(HttpMethod.Get, uri) { Version = version }; switch (testMethod) { case "GetAsync": { buffersResponse = true; await client.GetAsync(uri); } break; case "Send": { buffersResponse = true; await Task.Run(() => client.Send(request)); } break; case "UnbufferedSend": { buffersResponse = false; using HttpResponseMessage response = await Task.Run(() => client.Send(request, HttpCompletionOption.ResponseHeadersRead)); response.Content.CopyTo(Stream.Null, null, default); } break; case "SendAsync": { buffersResponse = true; await client.SendAsync(request); } break; case "UnbufferedSendAsync": { buffersResponse = false; using HttpResponseMessage response = await client.SendAsync(request, HttpCompletionOption.ResponseHeadersRead); await response.Content.CopyToAsync(Stream.Null); } break; case "GetStringAsync": { buffersResponse = true; await client.GetStringAsync(uri); } break; case "GetByteArrayAsync": { buffersResponse = true; await client.GetByteArrayAsync(uri); } break; case "GetStreamAsync": { buffersResponse = false; using Stream responseStream = await client.GetStreamAsync(uri); await responseStream.CopyToAsync(Stream.Null); } break; case "InvokerSend": { buffersResponse = false; using HttpResponseMessage response = await Task.Run(() => invoker.Send(request, cancellationToken: default)); await response.Content.CopyToAsync(Stream.Null); } break; case "InvokerSendAsync": { buffersResponse = false; using HttpResponseMessage response = await invoker.SendAsync(request, cancellationToken: default); await response.Content.CopyToAsync(Stream.Null); } break; } }, async server => { await server.AcceptConnectionAsync(async connection => { await connection.ReadRequestDataAsync(); await WaitForEventCountersAsync(events); await connection.SendResponseAsync(content: new string('a', ResponseContentLength)); }); }); await WaitForEventCountersAsync(events); }); Assert.DoesNotContain(events, e => e.Event.EventId == 0); // errors from the EventSource itself ValidateStartFailedStopEvents(events, version); ValidateConnectionEstablishedClosed(events, version); ValidateRequestResponseStartStopEvents( events, requestContentLength: null, responseContentLength: buffersResponse ? ResponseContentLength : null, count: 1); ValidateEventCounters(events, requestCount: 1, shouldHaveFailures: false, versionMajor: version.Major); }, UseVersion.ToString(), QuicImplementationProvider, testMethod).Dispose(); } [OuterLoop] [ConditionalTheory(typeof(RemoteExecutor), nameof(RemoteExecutor.IsSupported))] [MemberData(nameof(TestMethods_MemberData))] public void EventSource_UnsuccessfulRequest_LogsStartFailedStop(string testMethod) { if (UseVersion.Major != 1 && !testMethod.EndsWith("Async")) { // Synchronous requests are only supported for HTTP/1.1 return; } RemoteExecutor.Invoke(static async (useVersionString, quicProvider, testMethod) => { Version version = Version.Parse(useVersionString); using var listener = new TestEventListener("System.Net.Http", EventLevel.Verbose, eventCounterInterval: 0.1d); listener.AddActivityTracking(); var events = new ConcurrentQueue<(EventWrittenEventArgs Event, Guid ActivityId)>(); await listener.RunWithCallbackAsync(e => events.Enqueue((e, e.ActivityId)), async () => { var semaphore = new SemaphoreSlim(0, 1); var cts = new CancellationTokenSource(); await GetFactoryForVersion(version, GetQuicImplementationProvider(quicProvider)).CreateClientAndServerAsync( async uri => { using HttpClientHandler handler = CreateHttpClientHandler(version, GetQuicImplementationProvider(quicProvider)); using HttpClient client = CreateHttpClient(handler, useVersionString); using var invoker = new HttpMessageInvoker(handler); var request = new HttpRequestMessage(HttpMethod.Get, uri) { Version = version }; switch (testMethod) { case "GetAsync": await Assert.ThrowsAsync<TaskCanceledException>(async () => await client.GetAsync(uri, cts.Token)); break; case "Send": await Assert.ThrowsAsync<TaskCanceledException>(async () => await Task.Run(() => client.Send(request, cts.Token))); break; case "UnbufferedSend": await Assert.ThrowsAsync<TaskCanceledException>(async () => await Task.Run(() => client.Send(request, HttpCompletionOption.ResponseHeadersRead, cts.Token))); break; case "SendAsync": await Assert.ThrowsAsync<TaskCanceledException>(async () => await client.SendAsync(request, cts.Token)); break; case "UnbufferedSendAsync": await Assert.ThrowsAsync<TaskCanceledException>(async () => await client.SendAsync(request, HttpCompletionOption.ResponseHeadersRead, cts.Token)); break; case "GetStringAsync": await Assert.ThrowsAsync<TaskCanceledException>(async () => await client.GetStringAsync(uri, cts.Token)); break; case "GetByteArrayAsync": await Assert.ThrowsAsync<TaskCanceledException>(async () => await client.GetByteArrayAsync(uri, cts.Token)); break; case "GetStreamAsync": await Assert.ThrowsAsync<TaskCanceledException>(async () => await client.GetStreamAsync(uri, cts.Token)); break; case "InvokerSend": await Assert.ThrowsAsync<TaskCanceledException>(async () => await Task.Run(() => invoker.Send(request, cts.Token))); break; case "InvokerSendAsync": await Assert.ThrowsAsync<TaskCanceledException>(async () => await invoker.SendAsync(request, cts.Token)); break; } semaphore.Release(); }, async server => { await server.AcceptConnectionAsync(async connection => { await connection.ReadRequestDataAsync(); await WaitForEventCountersAsync(events); cts.Cancel(); Assert.True(await semaphore.WaitAsync(TimeSpan.FromSeconds(30))); }); }); await WaitForEventCountersAsync(events); }); Assert.DoesNotContain(events, e => e.Event.EventId == 0); // errors from the EventSource itself ValidateStartFailedStopEvents(events, version, shouldHaveFailures: true); ValidateConnectionEstablishedClosed(events, version); ValidateEventCounters(events, requestCount: 1, shouldHaveFailures: true, versionMajor: version.Major); }, UseVersion.ToString(), QuicImplementationProvider, testMethod).Dispose(); } [OuterLoop] [ConditionalTheory(typeof(RemoteExecutor), nameof(RemoteExecutor.IsSupported))] [InlineData("PostAsync")] [InlineData("Send")] [InlineData("SendAsync")] [InlineData("SendChunkedAsync")] [InlineData("InvokerSend")] [InlineData("InvokerSendAsync")] public void EventSource_SendingRequestContent_LogsRequestContentStartStop(string testMethod) { if (UseVersion.Major != 1 && !testMethod.EndsWith("Async")) { // Synchronous requests are only supported for HTTP/1.1 return; } RemoteExecutor.Invoke(static async (useVersionString, quicProvider, testMethod) => { const int RequestContentLength = 42; const int ResponseContentLength = 43; Version version = Version.Parse(useVersionString); using var listener = new TestEventListener("System.Net.Http", EventLevel.Verbose, eventCounterInterval: 0.1d); listener.AddActivityTracking(); var events = new ConcurrentQueue<(EventWrittenEventArgs Event, Guid ActivityId)>(); await listener.RunWithCallbackAsync(e => events.Enqueue((e, e.ActivityId)), async () => { await GetFactoryForVersion(version, GetQuicImplementationProvider(quicProvider)).CreateClientAndServerAsync( async uri => { using HttpClientHandler handler = CreateHttpClientHandler(version, GetQuicImplementationProvider(quicProvider)); using HttpClient client = CreateHttpClient(handler, useVersionString); using var invoker = new HttpMessageInvoker(handler); var request = new HttpRequestMessage(HttpMethod.Get, uri) { Version = version }; var content = new ByteArrayContent(Encoding.ASCII.GetBytes(new string('a', RequestContentLength))); request.Content = content; switch (testMethod) { case "PostAsync": await client.PostAsync(uri, content); break; case "Send": await Task.Run(() => client.Send(request)); break; case "SendAsync": await client.SendAsync(request); break; case "SendChunkedAsync": request.Headers.TransferEncodingChunked = true; await client.SendAsync(request); break; case "InvokerSend": HttpResponseMessage syncResponse = await Task.Run(() => invoker.Send(request, cancellationToken: default)); await syncResponse.Content.CopyToAsync(Stream.Null); break; case "InvokerSendAsync": HttpResponseMessage asyncResponse = await invoker.SendAsync(request, cancellationToken: default); await asyncResponse.Content.CopyToAsync(Stream.Null); break; } }, async server => { await server.AcceptConnectionAsync(async connection => { await connection.ReadRequestDataAsync(); await WaitForEventCountersAsync(events); await connection.SendResponseAsync(content: new string('a', ResponseContentLength)); }); }); await WaitForEventCountersAsync(events); }); Assert.DoesNotContain(events, e => e.Event.EventId == 0); // errors from the EventSource itself ValidateStartFailedStopEvents(events, version); ValidateConnectionEstablishedClosed(events, version); ValidateRequestResponseStartStopEvents( events, RequestContentLength, responseContentLength: testMethod.StartsWith("InvokerSend") ? null : ResponseContentLength, count: 1); ValidateEventCounters(events, requestCount: 1, shouldHaveFailures: false, versionMajor: version.Major); }, UseVersion.ToString(), QuicImplementationProvider, testMethod).Dispose(); } private static void ValidateStartFailedStopEvents(ConcurrentQueue<(EventWrittenEventArgs Event, Guid ActivityId)> events, Version version, bool shouldHaveFailures = false, int count = 1) { (EventWrittenEventArgs Event, Guid ActivityId)[] starts = events.Where(e => e.Event.EventName == "RequestStart").ToArray(); foreach (EventWrittenEventArgs startEvent in starts.Select(e => e.Event)) { Assert.Equal(7, startEvent.Payload.Count); Assert.StartsWith("http", (string)startEvent.Payload[0]); Assert.NotEmpty((string)startEvent.Payload[1]); // host Assert.True(startEvent.Payload[2] is int port && port >= 0 && port <= 65535); Assert.NotEmpty((string)startEvent.Payload[3]); // pathAndQuery byte versionMajor = Assert.IsType<byte>(startEvent.Payload[4]); Assert.Equal(version.Major, versionMajor); byte versionMinor = Assert.IsType<byte>(startEvent.Payload[5]); Assert.Equal(version.Minor, versionMinor); Assert.InRange((HttpVersionPolicy)startEvent.Payload[6], HttpVersionPolicy.RequestVersionOrLower, HttpVersionPolicy.RequestVersionExact); } Assert.Equal(count, starts.Length); (EventWrittenEventArgs Event, Guid ActivityId)[] stops = events.Where(e => e.Event.EventName == "RequestStop").ToArray(); Assert.All(stops, stopEvent => Assert.Empty(stopEvent.Event.Payload)); ValidateSameActivityIds(starts, stops); (EventWrittenEventArgs Event, Guid ActivityId)[] failures = events.Where(e => e.Event.EventName == "RequestFailed").ToArray(); Assert.All(failures, failedEvent => Assert.Empty(failedEvent.Event.Payload)); if (shouldHaveFailures) { ValidateSameActivityIds(starts, failures); } else { Assert.Empty(failures); } } private static void ValidateConnectionEstablishedClosed(ConcurrentQueue<(EventWrittenEventArgs Event, Guid ActivityId)> events, Version version, int count = 1) { EventWrittenEventArgs[] connectionsEstablished = events.Select(e => e.Event).Where(e => e.EventName == "ConnectionEstablished").ToArray(); Assert.Equal(count, connectionsEstablished.Length); foreach (EventWrittenEventArgs connectionEstablished in connectionsEstablished) { Assert.Equal(2, connectionEstablished.Payload.Count); Assert.Equal(version.Major, (byte)connectionEstablished.Payload[0]); Assert.Equal(version.Minor, (byte)connectionEstablished.Payload[1]); } EventWrittenEventArgs[] connectionsClosed = events.Select(e => e.Event).Where(e => e.EventName == "ConnectionClosed").ToArray(); Assert.Equal(count, connectionsClosed.Length); foreach (EventWrittenEventArgs connectionClosed in connectionsClosed) { Assert.Equal(2, connectionClosed.Payload.Count); Assert.Equal(version.Major, (byte)connectionClosed.Payload[0]); Assert.Equal(version.Minor, (byte)connectionClosed.Payload[1]); } } private static void ValidateRequestResponseStartStopEvents(ConcurrentQueue<(EventWrittenEventArgs Event, Guid ActivityId)> events, int? requestContentLength, int? responseContentLength, int count) { (EventWrittenEventArgs Event, Guid ActivityId)[] requestHeadersStarts = events.Where(e => e.Event.EventName == "RequestHeadersStart").ToArray(); Assert.Equal(count, requestHeadersStarts.Length); Assert.All(requestHeadersStarts, r => Assert.Empty(r.Event.Payload)); (EventWrittenEventArgs Event, Guid ActivityId)[] requestHeadersStops = events.Where(e => e.Event.EventName == "RequestHeadersStop").ToArray(); Assert.Equal(count, requestHeadersStops.Length); Assert.All(requestHeadersStops, r => Assert.Empty(r.Event.Payload)); ValidateSameActivityIds(requestHeadersStarts, requestHeadersStops); (EventWrittenEventArgs Event, Guid ActivityId)[] requestContentStarts = events.Where(e => e.Event.EventName == "RequestContentStart").ToArray(); Assert.Equal(requestContentLength.HasValue ? count : 0, requestContentStarts.Length); Assert.All(requestContentStarts, r => Assert.Empty(r.Event.Payload)); (EventWrittenEventArgs Event, Guid ActivityId)[] requestContentStops = events.Where(e => e.Event.EventName == "RequestContentStop").ToArray(); Assert.Equal(requestContentLength.HasValue ? count : 0, requestContentStops.Length); foreach (EventWrittenEventArgs requestContentStop in requestContentStops.Select(e => e.Event)) { object payload = Assert.Single(requestContentStop.Payload); Assert.True(payload is long); Assert.Equal(requestContentLength.Value, (long)payload); } ValidateSameActivityIds(requestContentStarts, requestContentStops); (EventWrittenEventArgs Event, Guid ActivityId)[] responseHeadersStarts = events.Where(e => e.Event.EventName == "ResponseHeadersStart").ToArray(); Assert.Equal(count, responseHeadersStarts.Length); Assert.All(responseHeadersStarts, r => Assert.Empty(r.Event.Payload)); (EventWrittenEventArgs Event, Guid ActivityId)[] responseHeadersStops = events.Where(e => e.Event.EventName == "ResponseHeadersStop").ToArray(); Assert.Equal(count, responseHeadersStops.Length); Assert.All(responseHeadersStops, r => Assert.Empty(r.Event.Payload)); ValidateSameActivityIds(responseHeadersStarts, responseHeadersStops); (EventWrittenEventArgs Event, Guid ActivityId)[] responseContentStarts = events.Where(e => e.Event.EventName == "ResponseContentStart").ToArray(); Assert.Equal(responseContentLength.HasValue ? count : 0, responseContentStarts.Length); Assert.All(responseContentStarts, r => Assert.Empty(r.Event.Payload)); (EventWrittenEventArgs Event, Guid ActivityId)[] responseContentStops = events.Where(e => e.Event.EventName == "ResponseContentStop").ToArray(); Assert.Equal(responseContentLength.HasValue ? count : 0, responseContentStops.Length); Assert.All(responseContentStops, r => Assert.Empty(r.Event.Payload)); ValidateSameActivityIds(responseContentStarts, responseContentStops); } private static void ValidateSameActivityIds((EventWrittenEventArgs Event, Guid ActivityId)[] a, (EventWrittenEventArgs Event, Guid ActivityId)[] b) { Assert.Equal(a.Length, b.Length); for (int i = 0; i < a.Length; i++) { Assert.NotEqual(Guid.Empty, a[i].ActivityId); Assert.Equal(a[i].ActivityId, b[i].ActivityId); } } private static void ValidateEventCounters(ConcurrentQueue<(EventWrittenEventArgs Event, Guid ActivityId)> events, int requestCount, bool shouldHaveFailures, int versionMajor, bool requestLeftQueue = false) { Dictionary<string, double[]> eventCounters = events .Select(e => e.Event) .Where(e => e.EventName == "EventCounters") .Select(e => (IDictionary<string, object>)e.Payload.Single()) .GroupBy(d => (string)d["Name"], d => (double)(d.ContainsKey("Mean") ? d["Mean"] : d["Increment"])) .ToDictionary(p => p.Key, p => p.ToArray()); Assert.True(eventCounters.TryGetValue("requests-started", out double[] requestsStarted)); Assert.Equal(requestCount, requestsStarted[^1]); Assert.True(eventCounters.TryGetValue("requests-started-rate", out double[] requestRate)); Assert.Contains(requestRate, r => r > 0); Assert.True(eventCounters.TryGetValue("requests-failed", out double[] requestsFailures)); Assert.True(eventCounters.TryGetValue("requests-failed-rate", out double[] requestsFailureRate)); if (shouldHaveFailures) { Assert.Equal(1, requestsFailures[^1]); Assert.Contains(requestsFailureRate, r => r > 0); } else { Assert.All(requestsFailures, a => Assert.Equal(0, a)); Assert.All(requestsFailureRate, r => Assert.Equal(0, r)); } Assert.True(eventCounters.TryGetValue("current-requests", out double[] currentRequests)); Assert.Contains(currentRequests, c => c > 0); Assert.Equal(0, currentRequests[^1]); Assert.True(eventCounters.TryGetValue("http11-connections-current-total", out double[] http11ConnectionsTotal)); Assert.All(http11ConnectionsTotal, c => Assert.True(c >= 0)); Assert.Equal(0, http11ConnectionsTotal[^1]); Assert.True(eventCounters.TryGetValue("http20-connections-current-total", out double[] http20ConnectionsTotal)); Assert.All(http20ConnectionsTotal, c => Assert.True(c >= 0)); Assert.Equal(0, http20ConnectionsTotal[^1]); Assert.True(eventCounters.TryGetValue("http30-connections-current-total", out double[] http30ConnectionsTotal)); Assert.All(http30ConnectionsTotal, c => Assert.True(c >= 0)); Assert.Equal(0, http30ConnectionsTotal[^1]); if (versionMajor == 1) { Assert.Contains(http11ConnectionsTotal, d => d > 0); Assert.DoesNotContain(http20ConnectionsTotal, d => d > 0); Assert.DoesNotContain(http30ConnectionsTotal, d => d > 0); } else if (versionMajor == 2) { Assert.DoesNotContain(http11ConnectionsTotal, d => d > 0); Assert.Contains(http20ConnectionsTotal, d => d > 0); Assert.DoesNotContain(http30ConnectionsTotal, d => d > 0); } else { Assert.DoesNotContain(http11ConnectionsTotal, d => d > 0); Assert.DoesNotContain(http20ConnectionsTotal, d => d > 0); Assert.Contains(http30ConnectionsTotal, d => d > 0); } Assert.True(eventCounters.TryGetValue("http11-requests-queue-duration", out double[] http11requestQueueDurations)); Assert.All(http11requestQueueDurations, d => Assert.True(d >= 0)); Assert.Equal(0, http11requestQueueDurations[^1]); Assert.True(eventCounters.TryGetValue("http20-requests-queue-duration", out double[] http20requestQueueDurations)); Assert.All(http20requestQueueDurations, d => Assert.True(d >= 0)); Assert.Equal(0, http20requestQueueDurations[^1]); Assert.True(eventCounters.TryGetValue("http30-requests-queue-duration", out double[] http30requestQueueDurations)); Assert.All(http30requestQueueDurations, d => Assert.True(d >= 0)); Assert.Equal(0, http30requestQueueDurations[^1]); if (requestLeftQueue) { if (versionMajor == 1) { Assert.Contains(http11requestQueueDurations, d => d > 0); Assert.DoesNotContain(http20requestQueueDurations, d => d > 0); Assert.DoesNotContain(http30requestQueueDurations, d => d > 0); } else if (versionMajor == 2) { Assert.DoesNotContain(http11requestQueueDurations, d => d > 0); Assert.Contains(http20requestQueueDurations, d => d > 0); Assert.DoesNotContain(http30requestQueueDurations, d => d > 0); } else { Assert.DoesNotContain(http11requestQueueDurations, d => d > 0); Assert.DoesNotContain(http20requestQueueDurations, d => d > 0); Assert.Contains(http30requestQueueDurations, d => d > 0); } } } [OuterLoop] [ConditionalFact(typeof(RemoteExecutor), nameof(RemoteExecutor.IsSupported))] public void EventSource_ConnectionPoolAtMaxConnections_LogsRequestLeftQueue() { if (UseVersion.Major == 3 && UseQuicImplementationProvider == QuicImplementationProviders.Mock) { return; } RemoteExecutor.Invoke(static async (useVersionString, quicProvider) => { Version version = Version.Parse(useVersionString); using var listener = new TestEventListener("System.Net.Http", EventLevel.Verbose, eventCounterInterval: 0.1d); listener.AddActivityTracking(); var events = new ConcurrentQueue<(EventWrittenEventArgs Event, Guid ActivityId)>(); await listener.RunWithCallbackAsync(e => events.Enqueue((e, e.ActivityId)), async () => { var firstRequestReceived = new SemaphoreSlim(0, 1); var secondRequestSent = new SemaphoreSlim(0, 1); var firstRequestFinished = new SemaphoreSlim(0, 1); await GetFactoryForVersion(version, GetQuicImplementationProvider(quicProvider)).CreateClientAndServerAsync( async uri => { using HttpClientHandler handler = CreateHttpClientHandler(version, GetQuicImplementationProvider(quicProvider)); using HttpClient client = CreateHttpClient(handler, useVersionString); var socketsHttpHandler = GetUnderlyingSocketsHttpHandler(handler); socketsHttpHandler.MaxConnectionsPerServer = 1; socketsHttpHandler.SslOptions.RemoteCertificateValidationCallback = delegate { return true; }; // Dummy request to ensure that the MaxConcurrentStreams setting has been acknowledged await client.GetStringAsync(uri); Task firstRequest = client.GetStringAsync(uri); Assert.True(await firstRequestReceived.WaitAsync(TimeSpan.FromSeconds(10))); // We are now at the connection limit, the next request will wait for the first one to complete Task secondRequest = client.GetStringAsync(uri); secondRequestSent.Release(); // We are asserting that ActivityIds between Start/Stop pairs match below // We wait for the first request to finish to ensure that RequestStop events // are logged in the same order as RequestStarts await firstRequest; firstRequestFinished.Release(); await secondRequest; }, async server => { GenericLoopbackConnection connection; if (server is Http2LoopbackServer http2Server) { connection = await http2Server.EstablishConnectionAsync(new SettingsEntry { SettingId = SettingId.MaxConcurrentStreams, Value = 1 }); } else { connection = await server.EstablishGenericConnectionAsync(); } using (connection) { // Dummy request to ensure that the MaxConcurrentStreams setting has been acknowledged await connection.ReadRequestDataAsync(readBody: false); await connection.SendResponseAsync(); // First request await connection.ReadRequestDataAsync(readBody: false); firstRequestReceived.Release(); Assert.True(await secondRequestSent.WaitAsync(TimeSpan.FromSeconds(10))); await WaitForEventCountersAsync(events); await connection.SendResponseAsync(); // Second request Assert.True(await firstRequestFinished.WaitAsync(TimeSpan.FromSeconds(10))); await connection.ReadRequestDataAsync(readBody: false); await connection.SendResponseAsync(); }; }, options: new Http3Options { MaxBidirectionalStreams = 1 }); await WaitForEventCountersAsync(events); }); Assert.DoesNotContain(events, e => e.Event.EventId == 0); // errors from the EventSource itself ValidateStartFailedStopEvents(events, version, count: 3); ValidateConnectionEstablishedClosed(events, version); var requestLeftQueueEvents = events.Where(e => e.Event.EventName == "RequestLeftQueue"); Assert.InRange(requestLeftQueueEvents.Count(), 2, version.Major == 3 ? 3 : 2); foreach (var (e, _) in requestLeftQueueEvents) { Assert.Equal(3, e.Payload.Count); Assert.True((double)e.Payload[0] > 0); // timeSpentOnQueue Assert.Equal(version.Major, (byte)e.Payload[1]); Assert.Equal(version.Minor, (byte)e.Payload[2]); } Guid requestLeftQueueId = requestLeftQueueEvents.Last().ActivityId; Assert.Equal(requestLeftQueueId, events.Where(e => e.Event.EventName == "RequestStart").Last().ActivityId); ValidateRequestResponseStartStopEvents(events, requestContentLength: null, responseContentLength: 0, count: 3); ValidateEventCounters(events, requestCount: 3, shouldHaveFailures: false, versionMajor: version.Major, requestLeftQueue: true); }, UseVersion.ToString(), QuicImplementationProvider).Dispose(); } private static async Task WaitForEventCountersAsync(ConcurrentQueue<(EventWrittenEventArgs Event, Guid ActivityId)> events) { DateTime startTime = DateTime.UtcNow; int startCount = events.Count; while (events.Skip(startCount).Count(e => IsRequestsStartedEventCounter(e.Event)) < 3) { if (DateTime.UtcNow.Subtract(startTime) > TimeSpan.FromSeconds(30)) throw new TimeoutException($"Timed out waiting for EventCounters"); await Task.Delay(100); } static bool IsRequestsStartedEventCounter(EventWrittenEventArgs e) { if (e.EventName != "EventCounters") return false; var dictionary = (IDictionary<string, object>)e.Payload.Single(); return (string)dictionary["Name"] == "requests-started"; } } } public sealed class TelemetryTest_Http11 : TelemetryTest { public TelemetryTest_Http11(ITestOutputHelper output) : base(output) { } } public sealed class TelemetryTest_Http20 : TelemetryTest { protected override Version UseVersion => HttpVersion.Version20; public TelemetryTest_Http20(ITestOutputHelper output) : base(output) { } } [ConditionalClass(typeof(HttpClientHandlerTestBase), nameof(IsMsQuicSupported))] public sealed class TelemetryTest_Http30_MsQuic : TelemetryTest { protected override Version UseVersion => HttpVersion.Version30; protected override QuicImplementationProvider UseQuicImplementationProvider => QuicImplementationProviders.MsQuic; public TelemetryTest_Http30_MsQuic(ITestOutputHelper output) : base(output) { } } [ConditionalClass(typeof(HttpClientHandlerTestBase), nameof(IsMockQuicSupported))] public sealed class TelemetryTest_Http30_Mock : TelemetryTest { protected override Version UseVersion => HttpVersion.Version30; protected override QuicImplementationProvider UseQuicImplementationProvider => QuicImplementationProviders.Mock; public TelemetryTest_Http30_Mock(ITestOutputHelper output) : base(output) { } } }
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/libraries/System.Speech/src/Synthesis/PromptBuilder.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Collections.ObjectModel; using System.ComponentModel; using System.Globalization; using System.IO; using System.Speech.Internal; using System.Speech.Internal.Synthesis; using System.Xml; namespace System.Speech.Synthesis { [Serializable] public class PromptBuilder { #region Constructors public PromptBuilder() : this(CultureInfo.CurrentUICulture) { } public PromptBuilder(CultureInfo culture) { Helpers.ThrowIfNull(culture, nameof(culture)); if (culture.Equals(CultureInfo.InvariantCulture)) { throw new ArgumentException(SR.Get(SRID.InvariantCultureInfo), nameof(culture)); } _culture = culture; // Reset all value to default ClearContent(); } #endregion #region Public Methods // Use Append* naming convention. /// <summary> /// Clear the content of the prompt builder /// </summary> public void ClearContent() { _elements.Clear(); _elementStack.Push(new StackElement(SsmlElement.Lexicon | SsmlElement.Meta | SsmlElement.MetaData | SsmlElement.ParagraphOrSentence | SsmlElement.AudioMarkTextWithStyle, SsmlState.Header, _culture)); } /// <summary> /// Append Text to the SSML stream /// </summary> public void AppendText(string textToSpeak) { Helpers.ThrowIfNull(textToSpeak, nameof(textToSpeak)); // Validate that text can be added in this context ValidateElement(_elementStack.Peek(), SsmlElement.Text); _elements.Add(new Element(ElementType.Text, textToSpeak)); } public void AppendText(string textToSpeak, PromptRate rate) { Helpers.ThrowIfNull(textToSpeak, nameof(textToSpeak)); if (rate < PromptRate.NotSet || rate > PromptRate.ExtraSlow) { throw new ArgumentOutOfRangeException(nameof(rate)); } // Validate that text can be added in this context ValidateElement(_elementStack.Peek(), SsmlElement.Text); Element prosodyElement = new(ElementType.Prosody, textToSpeak); _elements.Add(prosodyElement); string sPromptRate = null; switch (rate) { case PromptRate.NotSet: break; case PromptRate.ExtraFast: sPromptRate = "x-fast"; break; case PromptRate.ExtraSlow: sPromptRate = "x-slow"; break; default: sPromptRate = rate.ToString().ToLowerInvariant(); break; } if (!string.IsNullOrEmpty(sPromptRate)) { prosodyElement._attributes = new Collection<AttributeItem>(); prosodyElement._attributes.Add(new AttributeItem("rate", sPromptRate)); } } public void AppendText(string textToSpeak, PromptVolume volume) { Helpers.ThrowIfNull(textToSpeak, nameof(textToSpeak)); if (volume < PromptVolume.NotSet || volume > PromptVolume.Default) { throw new ArgumentOutOfRangeException(nameof(volume)); } // Validate that text can be added in this context ValidateElement(_elementStack.Peek(), SsmlElement.Text); Element prosodyElement = new(ElementType.Prosody, textToSpeak); _elements.Add(prosodyElement); string sVolumeLevel = null; switch (volume) { // No volume do not set the attribute case PromptVolume.NotSet: break; case PromptVolume.ExtraSoft: sVolumeLevel = "x-soft"; break; case PromptVolume.ExtraLoud: sVolumeLevel = "x-loud"; break; default: sVolumeLevel = volume.ToString().ToLowerInvariant(); break; } if (!string.IsNullOrEmpty(sVolumeLevel)) { prosodyElement._attributes = new Collection<AttributeItem>(); prosodyElement._attributes.Add(new AttributeItem("volume", sVolumeLevel)); } } public void AppendText(string textToSpeak, PromptEmphasis emphasis) { Helpers.ThrowIfNull(textToSpeak, nameof(textToSpeak)); if (emphasis < PromptEmphasis.NotSet || emphasis > PromptEmphasis.Reduced) { throw new ArgumentOutOfRangeException(nameof(emphasis)); } // Validate that text can be added in this context ValidateElement(_elementStack.Peek(), SsmlElement.Text); Element emphasisElement = new(ElementType.Emphasis, textToSpeak); _elements.Add(emphasisElement); if (emphasis != PromptEmphasis.NotSet) { emphasisElement._attributes = new Collection<AttributeItem>(); emphasisElement._attributes.Add(new AttributeItem("level", emphasis.ToString().ToLowerInvariant())); } } public void StartStyle(PromptStyle style) { Helpers.ThrowIfNull(style, nameof(style)); // Validate that text can be added in this context StackElement stackElement = _elementStack.Peek(); ValidateElement(stackElement, SsmlElement.Prosody); // For emphasis or Prosody the list of possible elements that can be children is different. SsmlState ssmlState = 0; SsmlElement possibleChildren = stackElement._possibleChildren; _elements.Add(new Element(ElementType.StartStyle)); if (style.Emphasis != PromptEmphasis.NotSet) { Element emphasisElement = new(ElementType.Emphasis); _elements.Add(emphasisElement); emphasisElement._attributes = new Collection<AttributeItem>(); emphasisElement._attributes.Add(new AttributeItem("level", style.Emphasis.ToString().ToLowerInvariant())); // Set the expected children and mark the element used possibleChildren = SsmlElement.AudioMarkTextWithStyle; ssmlState = SsmlState.StyleEmphasis; } if (style.Rate != PromptRate.NotSet || style.Volume != PromptVolume.NotSet) { // two elements add a second start style if (ssmlState != 0) { _elements.Add(new Element(ElementType.StartStyle)); } Element prosodyElement = new(ElementType.Prosody); _elements.Add(prosodyElement); if (style.Rate != PromptRate.NotSet) { string sPromptRate; switch (style.Rate) { case PromptRate.ExtraFast: sPromptRate = "x-fast"; break; case PromptRate.ExtraSlow: sPromptRate = "x-slow"; break; default: sPromptRate = style.Rate.ToString().ToLowerInvariant(); break; } prosodyElement._attributes = new Collection<AttributeItem>(); prosodyElement._attributes.Add(new AttributeItem("rate", sPromptRate)); } if (style.Volume != PromptVolume.NotSet) { string sVolumeLevel; switch (style.Volume) { case PromptVolume.ExtraSoft: sVolumeLevel = "x-soft"; break; case PromptVolume.ExtraLoud: sVolumeLevel = "x-loud"; break; default: sVolumeLevel = style.Volume.ToString().ToLowerInvariant(); break; } if (prosodyElement._attributes == null) { prosodyElement._attributes = new Collection<AttributeItem>(); } prosodyElement._attributes.Add(new AttributeItem("volume", sVolumeLevel)); } // Set the expected children and mark the element used possibleChildren = SsmlElement.ParagraphOrSentence | SsmlElement.AudioMarkTextWithStyle; ssmlState |= SsmlState.StyleProsody; } _elementStack.Push(new StackElement(possibleChildren, ssmlState, stackElement._culture)); } public void EndStyle() { StackElement stackElement = _elementStack.Pop(); if (stackElement._state != 0) { if ((stackElement._state & (SsmlState.StyleEmphasis | SsmlState.StyleProsody)) == 0) { throw new InvalidOperationException(SR.Get(SRID.PromptBuilderMismatchStyle)); } _elements.Add(new Element(ElementType.EndStyle)); // Check if 2 xml elements have been created if (stackElement._state == (SsmlState.StyleEmphasis | SsmlState.StyleProsody)) { _elements.Add(new Element(ElementType.EndStyle)); } } } public void StartVoice(VoiceInfo voice) { Helpers.ThrowIfNull(voice, nameof(voice)); if (!VoiceInfo.ValidateGender(voice.Gender)) { throw new ArgumentException(SR.Get(SRID.EnumInvalid, "VoiceGender"), nameof(voice)); } if (!VoiceInfo.ValidateAge(voice.Age)) { throw new ArgumentException(SR.Get(SRID.EnumInvalid, "VoiceAge"), nameof(voice)); } StackElement stackElement = _elementStack.Peek(); ValidateElement(stackElement, SsmlElement.Voice); CultureInfo culture = voice.Culture == null ? stackElement._culture : voice.Culture; Element startVoice = new(ElementType.StartVoice); startVoice._attributes = new Collection<AttributeItem>(); _elements.Add(startVoice); if (!string.IsNullOrEmpty(voice.Name)) { startVoice._attributes.Add(new AttributeItem("name", voice.Name)); } if (voice.Culture != null) { startVoice._attributes.Add(new AttributeItem("xml", "lang", voice.Culture.Name)); } if (voice.Gender != VoiceGender.NotSet) { startVoice._attributes.Add(new AttributeItem("gender", voice.Gender.ToString().ToLowerInvariant())); } if (voice.Age != VoiceAge.NotSet) { startVoice._attributes.Add(new AttributeItem("age", ((int)voice.Age).ToString(CultureInfo.InvariantCulture))); } if (voice.Variant >= 0) { startVoice._attributes.Add(new AttributeItem("variant", voice.Variant.ToString(CultureInfo.InvariantCulture))); } _elementStack.Push(new StackElement(SsmlElement.Sentence | SsmlElement.AudioMarkTextWithStyle, SsmlState.Voice, culture)); } public void StartVoice(string name) { Helpers.ThrowIfEmptyOrNull(name, nameof(name)); StartVoice(new VoiceInfo(name)); } public void StartVoice(VoiceGender gender) { StartVoice(new VoiceInfo(gender)); } public void StartVoice(VoiceGender gender, VoiceAge age) { StartVoice(new VoiceInfo(gender, age)); } public void StartVoice(VoiceGender gender, VoiceAge age, int voiceAlternate) { StartVoice(new VoiceInfo(gender, age, voiceAlternate)); } public void StartVoice(CultureInfo culture) { StartVoice(new VoiceInfo(culture)); } public void EndVoice() { if (_elementStack.Pop()._state != SsmlState.Voice) { throw new InvalidOperationException(SR.Get(SRID.PromptBuilderMismatchVoice)); } _elements.Add(new Element(ElementType.EndVoice)); } // <paragraph>, <sentence> public void StartParagraph() { StartParagraph(null); } public void StartParagraph(CultureInfo culture) { // check for well formed document StackElement stackElement = _elementStack.Peek(); ValidateElement(stackElement, SsmlElement.Paragraph); Element startParagraph = new(ElementType.StartParagraph); _elements.Add(startParagraph); if (culture != null) { if (culture.Equals(CultureInfo.InvariantCulture)) { throw new ArgumentException(SR.Get(SRID.InvariantCultureInfo), nameof(culture)); } startParagraph._attributes = new Collection<AttributeItem>(); startParagraph._attributes.Add(new AttributeItem("xml", "lang", culture.Name)); } else { culture = stackElement._culture; } _elementStack.Push(new StackElement(SsmlElement.AudioMarkTextWithStyle | SsmlElement.Sentence, SsmlState.Paragraph, culture)); } public void EndParagraph() { if (_elementStack.Pop()._state != SsmlState.Paragraph) { throw new InvalidOperationException(SR.Get(SRID.PromptBuilderMismatchParagraph)); } _elements.Add(new Element(ElementType.EndParagraph)); } public void StartSentence() { StartSentence(null); } public void StartSentence(CultureInfo culture) { // check for well formed document StackElement stackElement = _elementStack.Peek(); ValidateElement(stackElement, SsmlElement.Sentence); Element startSentence = new(ElementType.StartSentence); _elements.Add(startSentence); if (culture != null) { if (culture.Equals(CultureInfo.InvariantCulture)) { throw new ArgumentException(SR.Get(SRID.InvariantCultureInfo), nameof(culture)); } startSentence._attributes = new Collection<AttributeItem>(); startSentence._attributes.Add(new AttributeItem("xml", "lang", culture.Name)); } else { culture = stackElement._culture; } _elementStack.Push(new StackElement(SsmlElement.AudioMarkTextWithStyle, SsmlState.Sentence, culture)); } public void EndSentence() { if (_elementStack.Pop()._state != SsmlState.Sentence) { throw new InvalidOperationException(SR.Get(SRID.PromptBuilderMismatchSentence)); } _elements.Add(new Element(ElementType.EndSentence)); } public void AppendTextWithHint(string textToSpeak, SayAs sayAs) { Helpers.ThrowIfNull(textToSpeak, nameof(textToSpeak)); if (sayAs < SayAs.SpellOut || sayAs > SayAs.Text) { throw new ArgumentOutOfRangeException(nameof(sayAs)); } // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Text); if (sayAs != SayAs.Text) { Element sayAsElement = new(ElementType.SayAs, textToSpeak); _elements.Add(sayAsElement); sayAsElement._attributes = new Collection<AttributeItem>(); string sInterpretAs = null; string sFormat = null; switch (sayAs) { case SayAs.SpellOut: sInterpretAs = "characters"; break; case SayAs.NumberOrdinal: sInterpretAs = "ordinal"; break; case SayAs.NumberCardinal: sInterpretAs = "cardinal"; break; case SayAs.Date: sInterpretAs = "date"; break; case SayAs.DayMonthYear: sInterpretAs = "date"; sFormat = "dmy"; break; case SayAs.MonthDayYear: sInterpretAs = "date"; sFormat = "mdy"; break; case SayAs.YearMonthDay: sInterpretAs = "date"; sFormat = "ymd"; break; case SayAs.YearMonth: sInterpretAs = "date"; sFormat = "ym"; break; case SayAs.MonthYear: sInterpretAs = "date"; sFormat = "my"; break; case SayAs.MonthDay: sInterpretAs = "date"; sFormat = "md"; break; case SayAs.DayMonth: sInterpretAs = "date"; sFormat = "dm"; break; case SayAs.Year: sInterpretAs = "date"; sFormat = "y"; break; case SayAs.Month: sInterpretAs = "date"; sFormat = "m"; break; case SayAs.Day: sInterpretAs = "date"; sFormat = "d"; break; case SayAs.Time: sInterpretAs = "time"; break; case SayAs.Time24: sInterpretAs = "time"; sFormat = "hms24"; break; case SayAs.Time12: sInterpretAs = "time"; sFormat = "hms12"; break; case SayAs.Telephone: sInterpretAs = "telephone"; break; } sayAsElement._attributes.Add(new AttributeItem("interpret-as", sInterpretAs)); if (!string.IsNullOrEmpty(sFormat)) { sayAsElement._attributes.Add(new AttributeItem("format", sFormat)); } } else { AppendText(textToSpeak); } } public void AppendTextWithHint(string textToSpeak, string sayAs) { Helpers.ThrowIfNull(textToSpeak, nameof(textToSpeak)); Helpers.ThrowIfEmptyOrNull(sayAs, nameof(sayAs)); // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Text); Element sayAsElement = new(ElementType.SayAs, textToSpeak); _elements.Add(sayAsElement); sayAsElement._attributes = new Collection<AttributeItem>(); sayAsElement._attributes.Add(new AttributeItem("interpret-as", sayAs)); } public void AppendTextWithPronunciation(string textToSpeak, string pronunciation) { Helpers.ThrowIfEmptyOrNull(textToSpeak, nameof(textToSpeak)); Helpers.ThrowIfEmptyOrNull(pronunciation, nameof(pronunciation)); // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Text); // validate the pronunciation PhonemeConverter.ValidateUpsIds(pronunciation); Element phoneElement = new(ElementType.Phoneme, textToSpeak); _elements.Add(phoneElement); phoneElement._attributes = new Collection<AttributeItem>(); phoneElement._attributes.Add(new AttributeItem("ph", pronunciation)); } public void AppendTextWithAlias(string textToSpeak, string substitute) { Helpers.ThrowIfNull(textToSpeak, nameof(textToSpeak)); Helpers.ThrowIfNull(substitute, nameof(substitute)); // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Text); Element subElement = new(ElementType.Sub, textToSpeak); _elements.Add(subElement); subElement._attributes = new Collection<AttributeItem>(); subElement._attributes.Add(new AttributeItem("alias", substitute)); } public void AppendBreak() { // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Break); _elements.Add(new Element(ElementType.Break)); } public void AppendBreak(PromptBreak strength) { // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Break); Element breakElement = new(ElementType.Break); _elements.Add(breakElement); string sBreak = null; switch (strength) { case PromptBreak.None: sBreak = "none"; break; case PromptBreak.ExtraSmall: sBreak = "x-weak"; break; case PromptBreak.Small: sBreak = "weak"; break; case PromptBreak.Medium: sBreak = "medium"; break; case PromptBreak.Large: sBreak = "strong"; break; case PromptBreak.ExtraLarge: sBreak = "x-strong"; break; default: throw new ArgumentNullException(nameof(strength)); } breakElement._attributes = new Collection<AttributeItem>(); breakElement._attributes.Add(new AttributeItem("strength", sBreak)); } public void AppendBreak(TimeSpan duration) { // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Break); if (duration.Ticks < 0) { throw new ArgumentOutOfRangeException(nameof(duration)); } Element breakElement = new(ElementType.Break); _elements.Add(breakElement); breakElement._attributes = new Collection<AttributeItem>(); breakElement._attributes.Add(new AttributeItem("time", duration.TotalMilliseconds + "ms")); } // <audio> public void AppendAudio(string path) { Helpers.ThrowIfEmptyOrNull(path, nameof(path)); Uri uri; try { uri = new Uri(path, UriKind.RelativeOrAbsolute); } catch (UriFormatException e) { throw new ArgumentException(e.Message, path, e); } // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Audio); AppendAudio(uri); } public void AppendAudio(Uri audioFile) { Helpers.ThrowIfNull(audioFile, nameof(audioFile)); // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Audio); Element audioElement = new(ElementType.Audio); _elements.Add(audioElement); audioElement._attributes = new Collection<AttributeItem>(); audioElement._attributes.Add(new AttributeItem("src", audioFile.ToString())); } public void AppendAudio(Uri audioFile, string alternateText) { Helpers.ThrowIfNull(audioFile, nameof(audioFile)); Helpers.ThrowIfNull(alternateText, nameof(alternateText)); // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Audio); Element audioElement = new(ElementType.Audio, alternateText); _elements.Add(audioElement); audioElement._attributes = new Collection<AttributeItem>(); audioElement._attributes.Add(new AttributeItem("src", audioFile.ToString())); } // <mark> public void AppendBookmark(string bookmarkName) { Helpers.ThrowIfEmptyOrNull(bookmarkName, nameof(bookmarkName)); // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Mark); Element bookmarkElement = new(ElementType.Bookmark); _elements.Add(bookmarkElement); bookmarkElement._attributes = new Collection<AttributeItem>(); bookmarkElement._attributes.Add(new AttributeItem("name", bookmarkName)); } public void AppendPromptBuilder(PromptBuilder promptBuilder) { Helpers.ThrowIfNull(promptBuilder, nameof(promptBuilder)); StringReader sr = new(promptBuilder.ToXml()); XmlTextReader reader = new(sr); AppendSsml(reader); reader.Close(); sr.Close(); } public void AppendSsml(string path) { Helpers.ThrowIfEmptyOrNull(path, nameof(path)); AppendSsml(new Uri(path, UriKind.Relative)); } public void AppendSsml(Uri ssmlFile) { Helpers.ThrowIfNull(ssmlFile, nameof(ssmlFile)); string localFile; Uri redirectUri; using (Stream stream = s_resourceLoader.LoadFile(ssmlFile, out localFile, out redirectUri)) { try { AppendSsml(new XmlTextReader(stream)); } finally { s_resourceLoader.UnloadFile(localFile); } } } public void AppendSsml(XmlReader ssmlFile) { Helpers.ThrowIfNull(ssmlFile, nameof(ssmlFile)); AppendSsmlInternal(ssmlFile); } // Advanced: Extensibility model to write through to the underlying stream writer. [EditorBrowsable(EditorBrowsableState.Never)] public void AppendSsmlMarkup(string ssmlMarkup) { Helpers.ThrowIfEmptyOrNull(ssmlMarkup, nameof(ssmlMarkup)); _elements.Add(new Element(ElementType.SsmlMarkup, ssmlMarkup)); } public string ToXml() { using (StringWriter sw = new(CultureInfo.InvariantCulture)) { using (XmlTextWriter writer = new(sw)) { WriteXml(writer); SsmlState state = _elementStack.Peek()._state; if (state != SsmlState.Header) { string sMsg = SR.Get(SRID.PromptBuilderInvalideState); switch (state) { case SsmlState.Ended: sMsg += SR.Get(SRID.PromptBuilderStateEnded); break; case SsmlState.Sentence: sMsg += SR.Get(SRID.PromptBuilderStateSentence); break; case SsmlState.Paragraph: sMsg += SR.Get(SRID.PromptBuilderStateParagraph); break; case SsmlState.StyleEmphasis: case SsmlState.StyleProsody: case (SsmlState.StyleProsody | SsmlState.StyleEmphasis): sMsg += SR.Get(SRID.PromptBuilderStateStyle); break; case SsmlState.Voice: sMsg += SR.Get(SRID.PromptBuilderStateVoice); break; default: System.Diagnostics.Debug.Assert(false); throw new NotSupportedException(); } throw new InvalidOperationException(sMsg); } return sw.ToString(); } } } #endregion #region public Properties public bool IsEmpty { get { return _elements.Count == 0; } } public CultureInfo Culture { get { return _culture; } set { if (value == null) { throw new ArgumentNullException(nameof(value)); } _culture = value; } } #endregion #region Internal Enums internal enum SsmlState { Header = 1, Paragraph = 2, Sentence = 4, StyleEmphasis = 8, StyleProsody = 16, Voice = 32, Ended = 64 } #endregion #region Protected Methods #endregion #region Private Methods private void WriteXml(XmlTextWriter writer) { writer.WriteStartElement("speak"); // Add the required elements. writer.WriteAttributeString("version", "1.0"); writer.WriteAttributeString("xmlns", _xmlnsDefault); writer.WriteAttributeString("xml", "lang", null, _culture.Name); bool noEndElement = false; foreach (Element element in _elements) { noEndElement = noEndElement || element._type == ElementType.StartSentence || element._type == ElementType.StartParagraph || element._type == ElementType.StartStyle || element._type == ElementType.StartVoice; switch (element._type) { case ElementType.Text: writer.WriteString(element._text); break; case ElementType.SsmlMarkup: writer.WriteRaw(element._text); break; case ElementType.StartVoice: case ElementType.StartParagraph: case ElementType.StartSentence: case ElementType.Audio: case ElementType.Break: case ElementType.Bookmark: case ElementType.Emphasis: case ElementType.Phoneme: case ElementType.Prosody: case ElementType.SayAs: case ElementType.Sub: writer.WriteStartElement(s_promptBuilderElementName[(int)element._type]); // Write the attributes if any if (element._attributes != null) { foreach (AttributeItem attribute in element._attributes) { if (attribute._namespace == null) { writer.WriteAttributeString(attribute._key, attribute._value); } else { writer.WriteAttributeString(attribute._namespace, attribute._key, null, attribute._value); } } } // Write the text if any if (element._text != null) { writer.WriteString(element._text); } // Close the element unless it should wait if (!noEndElement) { writer.WriteEndElement(); } noEndElement = false; break; // Ignore just set the bool to not close the element case ElementType.StartStyle: break; // Close the current element case ElementType.EndStyle: case ElementType.EndVoice: case ElementType.EndParagraph: case ElementType.EndSentence: writer.WriteEndElement(); break; default: throw new NotSupportedException(); } } writer.WriteEndElement(); } /// <summary> /// Ensure the this element is properly placed in the SSML markup /// </summary> private static void ValidateElement(StackElement stackElement, SsmlElement currentElement) { if ((stackElement._possibleChildren & currentElement) == 0) { throw new InvalidOperationException(string.Format(CultureInfo.InvariantCulture, SR.Get(SRID.PromptBuilderInvalidElement), currentElement.ToString(), stackElement._state.ToString())); } } private void AppendSsmlInternal(XmlReader ssmlFile) { // check for well formed document StackElement stackElement = _elementStack.Peek(); ValidateElement(_elementStack.Peek(), SsmlElement.Voice); using (StringWriter sw = new(CultureInfo.InvariantCulture)) { using (XmlTextWriter writer = new(sw)) { TextWriterEngine engine = new(writer, stackElement._culture); SsmlParser.Parse(ssmlFile, engine, null); } _elements.Add(new Element(ElementType.SsmlMarkup, sw.ToString())); } } #endregion #region Private Fields // Stack of elements for the SSML document private Stack<StackElement> _elementStack = new(); // <xml:lang> private CultureInfo _culture; // list of all the elements for this prompt builder private List<Element> _elements = new(); // Resource loader for the prompt builder private static ResourceLoader s_resourceLoader = new(); private const string _xmlnsDefault = @"http://www.w3.org/2001/10/synthesis"; #endregion #region Private Type [Serializable] private struct StackElement { internal SsmlElement _possibleChildren; internal SsmlState _state; internal CultureInfo _culture; internal StackElement(SsmlElement possibleChildren, SsmlState state, CultureInfo culture) { _possibleChildren = possibleChildren; _state = state; _culture = culture; } } private enum ElementType { Prosody, Emphasis, SayAs, Phoneme, Sub, Break, Audio, Bookmark, StartVoice, StartParagraph, StartSentence, EndSentence, EndParagraph, StartStyle, EndStyle, EndVoice, Text, SsmlMarkup } private static readonly string[] s_promptBuilderElementName = new string[] { "prosody", "emphasis", "say-as", "phoneme", "sub", "break", "audio", "mark", "voice", "p", "s" }; [Serializable] private struct AttributeItem { internal string _key; internal string _value; internal string _namespace; internal AttributeItem(string key, string value) { _key = key; _value = value; _namespace = null; } internal AttributeItem(string ns, string key, string value) : this(key, value) { _namespace = ns; } } [Serializable] private sealed class Element { internal ElementType _type; internal string _text; internal Collection<AttributeItem> _attributes; internal Element(ElementType type) { _type = type; } internal Element(ElementType type, string text) : this(type) { _text = text; } } #endregion } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Collections.ObjectModel; using System.ComponentModel; using System.Globalization; using System.IO; using System.Speech.Internal; using System.Speech.Internal.Synthesis; using System.Xml; namespace System.Speech.Synthesis { [Serializable] public class PromptBuilder { #region Constructors public PromptBuilder() : this(CultureInfo.CurrentUICulture) { } public PromptBuilder(CultureInfo culture) { Helpers.ThrowIfNull(culture, nameof(culture)); if (culture.Equals(CultureInfo.InvariantCulture)) { throw new ArgumentException(SR.Get(SRID.InvariantCultureInfo), nameof(culture)); } _culture = culture; // Reset all value to default ClearContent(); } #endregion #region Public Methods // Use Append* naming convention. /// <summary> /// Clear the content of the prompt builder /// </summary> public void ClearContent() { _elements.Clear(); _elementStack.Push(new StackElement(SsmlElement.Lexicon | SsmlElement.Meta | SsmlElement.MetaData | SsmlElement.ParagraphOrSentence | SsmlElement.AudioMarkTextWithStyle, SsmlState.Header, _culture)); } /// <summary> /// Append Text to the SSML stream /// </summary> public void AppendText(string textToSpeak) { Helpers.ThrowIfNull(textToSpeak, nameof(textToSpeak)); // Validate that text can be added in this context ValidateElement(_elementStack.Peek(), SsmlElement.Text); _elements.Add(new Element(ElementType.Text, textToSpeak)); } public void AppendText(string textToSpeak, PromptRate rate) { Helpers.ThrowIfNull(textToSpeak, nameof(textToSpeak)); if (rate < PromptRate.NotSet || rate > PromptRate.ExtraSlow) { throw new ArgumentOutOfRangeException(nameof(rate)); } // Validate that text can be added in this context ValidateElement(_elementStack.Peek(), SsmlElement.Text); Element prosodyElement = new(ElementType.Prosody, textToSpeak); _elements.Add(prosodyElement); string sPromptRate = null; switch (rate) { case PromptRate.NotSet: break; case PromptRate.ExtraFast: sPromptRate = "x-fast"; break; case PromptRate.ExtraSlow: sPromptRate = "x-slow"; break; default: sPromptRate = rate.ToString().ToLowerInvariant(); break; } if (!string.IsNullOrEmpty(sPromptRate)) { prosodyElement._attributes = new Collection<AttributeItem>(); prosodyElement._attributes.Add(new AttributeItem("rate", sPromptRate)); } } public void AppendText(string textToSpeak, PromptVolume volume) { Helpers.ThrowIfNull(textToSpeak, nameof(textToSpeak)); if (volume < PromptVolume.NotSet || volume > PromptVolume.Default) { throw new ArgumentOutOfRangeException(nameof(volume)); } // Validate that text can be added in this context ValidateElement(_elementStack.Peek(), SsmlElement.Text); Element prosodyElement = new(ElementType.Prosody, textToSpeak); _elements.Add(prosodyElement); string sVolumeLevel = null; switch (volume) { // No volume do not set the attribute case PromptVolume.NotSet: break; case PromptVolume.ExtraSoft: sVolumeLevel = "x-soft"; break; case PromptVolume.ExtraLoud: sVolumeLevel = "x-loud"; break; default: sVolumeLevel = volume.ToString().ToLowerInvariant(); break; } if (!string.IsNullOrEmpty(sVolumeLevel)) { prosodyElement._attributes = new Collection<AttributeItem>(); prosodyElement._attributes.Add(new AttributeItem("volume", sVolumeLevel)); } } public void AppendText(string textToSpeak, PromptEmphasis emphasis) { Helpers.ThrowIfNull(textToSpeak, nameof(textToSpeak)); if (emphasis < PromptEmphasis.NotSet || emphasis > PromptEmphasis.Reduced) { throw new ArgumentOutOfRangeException(nameof(emphasis)); } // Validate that text can be added in this context ValidateElement(_elementStack.Peek(), SsmlElement.Text); Element emphasisElement = new(ElementType.Emphasis, textToSpeak); _elements.Add(emphasisElement); if (emphasis != PromptEmphasis.NotSet) { emphasisElement._attributes = new Collection<AttributeItem>(); emphasisElement._attributes.Add(new AttributeItem("level", emphasis.ToString().ToLowerInvariant())); } } public void StartStyle(PromptStyle style) { Helpers.ThrowIfNull(style, nameof(style)); // Validate that text can be added in this context StackElement stackElement = _elementStack.Peek(); ValidateElement(stackElement, SsmlElement.Prosody); // For emphasis or Prosody the list of possible elements that can be children is different. SsmlState ssmlState = 0; SsmlElement possibleChildren = stackElement._possibleChildren; _elements.Add(new Element(ElementType.StartStyle)); if (style.Emphasis != PromptEmphasis.NotSet) { Element emphasisElement = new(ElementType.Emphasis); _elements.Add(emphasisElement); emphasisElement._attributes = new Collection<AttributeItem>(); emphasisElement._attributes.Add(new AttributeItem("level", style.Emphasis.ToString().ToLowerInvariant())); // Set the expected children and mark the element used possibleChildren = SsmlElement.AudioMarkTextWithStyle; ssmlState = SsmlState.StyleEmphasis; } if (style.Rate != PromptRate.NotSet || style.Volume != PromptVolume.NotSet) { // two elements add a second start style if (ssmlState != 0) { _elements.Add(new Element(ElementType.StartStyle)); } Element prosodyElement = new(ElementType.Prosody); _elements.Add(prosodyElement); if (style.Rate != PromptRate.NotSet) { string sPromptRate; switch (style.Rate) { case PromptRate.ExtraFast: sPromptRate = "x-fast"; break; case PromptRate.ExtraSlow: sPromptRate = "x-slow"; break; default: sPromptRate = style.Rate.ToString().ToLowerInvariant(); break; } prosodyElement._attributes = new Collection<AttributeItem>(); prosodyElement._attributes.Add(new AttributeItem("rate", sPromptRate)); } if (style.Volume != PromptVolume.NotSet) { string sVolumeLevel; switch (style.Volume) { case PromptVolume.ExtraSoft: sVolumeLevel = "x-soft"; break; case PromptVolume.ExtraLoud: sVolumeLevel = "x-loud"; break; default: sVolumeLevel = style.Volume.ToString().ToLowerInvariant(); break; } if (prosodyElement._attributes == null) { prosodyElement._attributes = new Collection<AttributeItem>(); } prosodyElement._attributes.Add(new AttributeItem("volume", sVolumeLevel)); } // Set the expected children and mark the element used possibleChildren = SsmlElement.ParagraphOrSentence | SsmlElement.AudioMarkTextWithStyle; ssmlState |= SsmlState.StyleProsody; } _elementStack.Push(new StackElement(possibleChildren, ssmlState, stackElement._culture)); } public void EndStyle() { StackElement stackElement = _elementStack.Pop(); if (stackElement._state != 0) { if ((stackElement._state & (SsmlState.StyleEmphasis | SsmlState.StyleProsody)) == 0) { throw new InvalidOperationException(SR.Get(SRID.PromptBuilderMismatchStyle)); } _elements.Add(new Element(ElementType.EndStyle)); // Check if 2 xml elements have been created if (stackElement._state == (SsmlState.StyleEmphasis | SsmlState.StyleProsody)) { _elements.Add(new Element(ElementType.EndStyle)); } } } public void StartVoice(VoiceInfo voice) { Helpers.ThrowIfNull(voice, nameof(voice)); if (!VoiceInfo.ValidateGender(voice.Gender)) { throw new ArgumentException(SR.Get(SRID.EnumInvalid, "VoiceGender"), nameof(voice)); } if (!VoiceInfo.ValidateAge(voice.Age)) { throw new ArgumentException(SR.Get(SRID.EnumInvalid, "VoiceAge"), nameof(voice)); } StackElement stackElement = _elementStack.Peek(); ValidateElement(stackElement, SsmlElement.Voice); CultureInfo culture = voice.Culture == null ? stackElement._culture : voice.Culture; Element startVoice = new(ElementType.StartVoice); startVoice._attributes = new Collection<AttributeItem>(); _elements.Add(startVoice); if (!string.IsNullOrEmpty(voice.Name)) { startVoice._attributes.Add(new AttributeItem("name", voice.Name)); } if (voice.Culture != null) { startVoice._attributes.Add(new AttributeItem("xml", "lang", voice.Culture.Name)); } if (voice.Gender != VoiceGender.NotSet) { startVoice._attributes.Add(new AttributeItem("gender", voice.Gender.ToString().ToLowerInvariant())); } if (voice.Age != VoiceAge.NotSet) { startVoice._attributes.Add(new AttributeItem("age", ((int)voice.Age).ToString(CultureInfo.InvariantCulture))); } if (voice.Variant >= 0) { startVoice._attributes.Add(new AttributeItem("variant", voice.Variant.ToString(CultureInfo.InvariantCulture))); } _elementStack.Push(new StackElement(SsmlElement.Sentence | SsmlElement.AudioMarkTextWithStyle, SsmlState.Voice, culture)); } public void StartVoice(string name) { Helpers.ThrowIfEmptyOrNull(name, nameof(name)); StartVoice(new VoiceInfo(name)); } public void StartVoice(VoiceGender gender) { StartVoice(new VoiceInfo(gender)); } public void StartVoice(VoiceGender gender, VoiceAge age) { StartVoice(new VoiceInfo(gender, age)); } public void StartVoice(VoiceGender gender, VoiceAge age, int voiceAlternate) { StartVoice(new VoiceInfo(gender, age, voiceAlternate)); } public void StartVoice(CultureInfo culture) { StartVoice(new VoiceInfo(culture)); } public void EndVoice() { if (_elementStack.Pop()._state != SsmlState.Voice) { throw new InvalidOperationException(SR.Get(SRID.PromptBuilderMismatchVoice)); } _elements.Add(new Element(ElementType.EndVoice)); } // <paragraph>, <sentence> public void StartParagraph() { StartParagraph(null); } public void StartParagraph(CultureInfo culture) { // check for well formed document StackElement stackElement = _elementStack.Peek(); ValidateElement(stackElement, SsmlElement.Paragraph); Element startParagraph = new(ElementType.StartParagraph); _elements.Add(startParagraph); if (culture != null) { if (culture.Equals(CultureInfo.InvariantCulture)) { throw new ArgumentException(SR.Get(SRID.InvariantCultureInfo), nameof(culture)); } startParagraph._attributes = new Collection<AttributeItem>(); startParagraph._attributes.Add(new AttributeItem("xml", "lang", culture.Name)); } else { culture = stackElement._culture; } _elementStack.Push(new StackElement(SsmlElement.AudioMarkTextWithStyle | SsmlElement.Sentence, SsmlState.Paragraph, culture)); } public void EndParagraph() { if (_elementStack.Pop()._state != SsmlState.Paragraph) { throw new InvalidOperationException(SR.Get(SRID.PromptBuilderMismatchParagraph)); } _elements.Add(new Element(ElementType.EndParagraph)); } public void StartSentence() { StartSentence(null); } public void StartSentence(CultureInfo culture) { // check for well formed document StackElement stackElement = _elementStack.Peek(); ValidateElement(stackElement, SsmlElement.Sentence); Element startSentence = new(ElementType.StartSentence); _elements.Add(startSentence); if (culture != null) { if (culture.Equals(CultureInfo.InvariantCulture)) { throw new ArgumentException(SR.Get(SRID.InvariantCultureInfo), nameof(culture)); } startSentence._attributes = new Collection<AttributeItem>(); startSentence._attributes.Add(new AttributeItem("xml", "lang", culture.Name)); } else { culture = stackElement._culture; } _elementStack.Push(new StackElement(SsmlElement.AudioMarkTextWithStyle, SsmlState.Sentence, culture)); } public void EndSentence() { if (_elementStack.Pop()._state != SsmlState.Sentence) { throw new InvalidOperationException(SR.Get(SRID.PromptBuilderMismatchSentence)); } _elements.Add(new Element(ElementType.EndSentence)); } public void AppendTextWithHint(string textToSpeak, SayAs sayAs) { Helpers.ThrowIfNull(textToSpeak, nameof(textToSpeak)); if (sayAs < SayAs.SpellOut || sayAs > SayAs.Text) { throw new ArgumentOutOfRangeException(nameof(sayAs)); } // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Text); if (sayAs != SayAs.Text) { Element sayAsElement = new(ElementType.SayAs, textToSpeak); _elements.Add(sayAsElement); sayAsElement._attributes = new Collection<AttributeItem>(); string sInterpretAs = null; string sFormat = null; switch (sayAs) { case SayAs.SpellOut: sInterpretAs = "characters"; break; case SayAs.NumberOrdinal: sInterpretAs = "ordinal"; break; case SayAs.NumberCardinal: sInterpretAs = "cardinal"; break; case SayAs.Date: sInterpretAs = "date"; break; case SayAs.DayMonthYear: sInterpretAs = "date"; sFormat = "dmy"; break; case SayAs.MonthDayYear: sInterpretAs = "date"; sFormat = "mdy"; break; case SayAs.YearMonthDay: sInterpretAs = "date"; sFormat = "ymd"; break; case SayAs.YearMonth: sInterpretAs = "date"; sFormat = "ym"; break; case SayAs.MonthYear: sInterpretAs = "date"; sFormat = "my"; break; case SayAs.MonthDay: sInterpretAs = "date"; sFormat = "md"; break; case SayAs.DayMonth: sInterpretAs = "date"; sFormat = "dm"; break; case SayAs.Year: sInterpretAs = "date"; sFormat = "y"; break; case SayAs.Month: sInterpretAs = "date"; sFormat = "m"; break; case SayAs.Day: sInterpretAs = "date"; sFormat = "d"; break; case SayAs.Time: sInterpretAs = "time"; break; case SayAs.Time24: sInterpretAs = "time"; sFormat = "hms24"; break; case SayAs.Time12: sInterpretAs = "time"; sFormat = "hms12"; break; case SayAs.Telephone: sInterpretAs = "telephone"; break; } sayAsElement._attributes.Add(new AttributeItem("interpret-as", sInterpretAs)); if (!string.IsNullOrEmpty(sFormat)) { sayAsElement._attributes.Add(new AttributeItem("format", sFormat)); } } else { AppendText(textToSpeak); } } public void AppendTextWithHint(string textToSpeak, string sayAs) { Helpers.ThrowIfNull(textToSpeak, nameof(textToSpeak)); Helpers.ThrowIfEmptyOrNull(sayAs, nameof(sayAs)); // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Text); Element sayAsElement = new(ElementType.SayAs, textToSpeak); _elements.Add(sayAsElement); sayAsElement._attributes = new Collection<AttributeItem>(); sayAsElement._attributes.Add(new AttributeItem("interpret-as", sayAs)); } public void AppendTextWithPronunciation(string textToSpeak, string pronunciation) { Helpers.ThrowIfEmptyOrNull(textToSpeak, nameof(textToSpeak)); Helpers.ThrowIfEmptyOrNull(pronunciation, nameof(pronunciation)); // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Text); // validate the pronunciation PhonemeConverter.ValidateUpsIds(pronunciation); Element phoneElement = new(ElementType.Phoneme, textToSpeak); _elements.Add(phoneElement); phoneElement._attributes = new Collection<AttributeItem>(); phoneElement._attributes.Add(new AttributeItem("ph", pronunciation)); } public void AppendTextWithAlias(string textToSpeak, string substitute) { Helpers.ThrowIfNull(textToSpeak, nameof(textToSpeak)); Helpers.ThrowIfNull(substitute, nameof(substitute)); // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Text); Element subElement = new(ElementType.Sub, textToSpeak); _elements.Add(subElement); subElement._attributes = new Collection<AttributeItem>(); subElement._attributes.Add(new AttributeItem("alias", substitute)); } public void AppendBreak() { // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Break); _elements.Add(new Element(ElementType.Break)); } public void AppendBreak(PromptBreak strength) { // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Break); Element breakElement = new(ElementType.Break); _elements.Add(breakElement); string sBreak = null; switch (strength) { case PromptBreak.None: sBreak = "none"; break; case PromptBreak.ExtraSmall: sBreak = "x-weak"; break; case PromptBreak.Small: sBreak = "weak"; break; case PromptBreak.Medium: sBreak = "medium"; break; case PromptBreak.Large: sBreak = "strong"; break; case PromptBreak.ExtraLarge: sBreak = "x-strong"; break; default: throw new ArgumentNullException(nameof(strength)); } breakElement._attributes = new Collection<AttributeItem>(); breakElement._attributes.Add(new AttributeItem("strength", sBreak)); } public void AppendBreak(TimeSpan duration) { // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Break); if (duration.Ticks < 0) { throw new ArgumentOutOfRangeException(nameof(duration)); } Element breakElement = new(ElementType.Break); _elements.Add(breakElement); breakElement._attributes = new Collection<AttributeItem>(); breakElement._attributes.Add(new AttributeItem("time", duration.TotalMilliseconds + "ms")); } // <audio> public void AppendAudio(string path) { Helpers.ThrowIfEmptyOrNull(path, nameof(path)); Uri uri; try { uri = new Uri(path, UriKind.RelativeOrAbsolute); } catch (UriFormatException e) { throw new ArgumentException(e.Message, path, e); } // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Audio); AppendAudio(uri); } public void AppendAudio(Uri audioFile) { Helpers.ThrowIfNull(audioFile, nameof(audioFile)); // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Audio); Element audioElement = new(ElementType.Audio); _elements.Add(audioElement); audioElement._attributes = new Collection<AttributeItem>(); audioElement._attributes.Add(new AttributeItem("src", audioFile.ToString())); } public void AppendAudio(Uri audioFile, string alternateText) { Helpers.ThrowIfNull(audioFile, nameof(audioFile)); Helpers.ThrowIfNull(alternateText, nameof(alternateText)); // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Audio); Element audioElement = new(ElementType.Audio, alternateText); _elements.Add(audioElement); audioElement._attributes = new Collection<AttributeItem>(); audioElement._attributes.Add(new AttributeItem("src", audioFile.ToString())); } // <mark> public void AppendBookmark(string bookmarkName) { Helpers.ThrowIfEmptyOrNull(bookmarkName, nameof(bookmarkName)); // check for well formed document ValidateElement(_elementStack.Peek(), SsmlElement.Mark); Element bookmarkElement = new(ElementType.Bookmark); _elements.Add(bookmarkElement); bookmarkElement._attributes = new Collection<AttributeItem>(); bookmarkElement._attributes.Add(new AttributeItem("name", bookmarkName)); } public void AppendPromptBuilder(PromptBuilder promptBuilder) { Helpers.ThrowIfNull(promptBuilder, nameof(promptBuilder)); StringReader sr = new(promptBuilder.ToXml()); XmlTextReader reader = new(sr); AppendSsml(reader); reader.Close(); sr.Close(); } public void AppendSsml(string path) { Helpers.ThrowIfEmptyOrNull(path, nameof(path)); AppendSsml(new Uri(path, UriKind.Relative)); } public void AppendSsml(Uri ssmlFile) { Helpers.ThrowIfNull(ssmlFile, nameof(ssmlFile)); string localFile; Uri redirectUri; using (Stream stream = s_resourceLoader.LoadFile(ssmlFile, out localFile, out redirectUri)) { try { AppendSsml(new XmlTextReader(stream)); } finally { s_resourceLoader.UnloadFile(localFile); } } } public void AppendSsml(XmlReader ssmlFile) { Helpers.ThrowIfNull(ssmlFile, nameof(ssmlFile)); AppendSsmlInternal(ssmlFile); } // Advanced: Extensibility model to write through to the underlying stream writer. [EditorBrowsable(EditorBrowsableState.Never)] public void AppendSsmlMarkup(string ssmlMarkup) { Helpers.ThrowIfEmptyOrNull(ssmlMarkup, nameof(ssmlMarkup)); _elements.Add(new Element(ElementType.SsmlMarkup, ssmlMarkup)); } public string ToXml() { using (StringWriter sw = new(CultureInfo.InvariantCulture)) { using (XmlTextWriter writer = new(sw)) { WriteXml(writer); SsmlState state = _elementStack.Peek()._state; if (state != SsmlState.Header) { string sMsg = SR.Get(SRID.PromptBuilderInvalideState); switch (state) { case SsmlState.Ended: sMsg += SR.Get(SRID.PromptBuilderStateEnded); break; case SsmlState.Sentence: sMsg += SR.Get(SRID.PromptBuilderStateSentence); break; case SsmlState.Paragraph: sMsg += SR.Get(SRID.PromptBuilderStateParagraph); break; case SsmlState.StyleEmphasis: case SsmlState.StyleProsody: case (SsmlState.StyleProsody | SsmlState.StyleEmphasis): sMsg += SR.Get(SRID.PromptBuilderStateStyle); break; case SsmlState.Voice: sMsg += SR.Get(SRID.PromptBuilderStateVoice); break; default: System.Diagnostics.Debug.Assert(false); throw new NotSupportedException(); } throw new InvalidOperationException(sMsg); } return sw.ToString(); } } } #endregion #region public Properties public bool IsEmpty { get { return _elements.Count == 0; } } public CultureInfo Culture { get { return _culture; } set { if (value == null) { throw new ArgumentNullException(nameof(value)); } _culture = value; } } #endregion #region Internal Enums internal enum SsmlState { Header = 1, Paragraph = 2, Sentence = 4, StyleEmphasis = 8, StyleProsody = 16, Voice = 32, Ended = 64 } #endregion #region Protected Methods #endregion #region Private Methods private void WriteXml(XmlTextWriter writer) { writer.WriteStartElement("speak"); // Add the required elements. writer.WriteAttributeString("version", "1.0"); writer.WriteAttributeString("xmlns", _xmlnsDefault); writer.WriteAttributeString("xml", "lang", null, _culture.Name); bool noEndElement = false; foreach (Element element in _elements) { noEndElement = noEndElement || element._type == ElementType.StartSentence || element._type == ElementType.StartParagraph || element._type == ElementType.StartStyle || element._type == ElementType.StartVoice; switch (element._type) { case ElementType.Text: writer.WriteString(element._text); break; case ElementType.SsmlMarkup: writer.WriteRaw(element._text); break; case ElementType.StartVoice: case ElementType.StartParagraph: case ElementType.StartSentence: case ElementType.Audio: case ElementType.Break: case ElementType.Bookmark: case ElementType.Emphasis: case ElementType.Phoneme: case ElementType.Prosody: case ElementType.SayAs: case ElementType.Sub: writer.WriteStartElement(s_promptBuilderElementName[(int)element._type]); // Write the attributes if any if (element._attributes != null) { foreach (AttributeItem attribute in element._attributes) { if (attribute._namespace == null) { writer.WriteAttributeString(attribute._key, attribute._value); } else { writer.WriteAttributeString(attribute._namespace, attribute._key, null, attribute._value); } } } // Write the text if any if (element._text != null) { writer.WriteString(element._text); } // Close the element unless it should wait if (!noEndElement) { writer.WriteEndElement(); } noEndElement = false; break; // Ignore just set the bool to not close the element case ElementType.StartStyle: break; // Close the current element case ElementType.EndStyle: case ElementType.EndVoice: case ElementType.EndParagraph: case ElementType.EndSentence: writer.WriteEndElement(); break; default: throw new NotSupportedException(); } } writer.WriteEndElement(); } /// <summary> /// Ensure the this element is properly placed in the SSML markup /// </summary> private static void ValidateElement(StackElement stackElement, SsmlElement currentElement) { if ((stackElement._possibleChildren & currentElement) == 0) { throw new InvalidOperationException(string.Format(CultureInfo.InvariantCulture, SR.Get(SRID.PromptBuilderInvalidElement), currentElement.ToString(), stackElement._state.ToString())); } } private void AppendSsmlInternal(XmlReader ssmlFile) { // check for well formed document StackElement stackElement = _elementStack.Peek(); ValidateElement(_elementStack.Peek(), SsmlElement.Voice); using (StringWriter sw = new(CultureInfo.InvariantCulture)) { using (XmlTextWriter writer = new(sw)) { TextWriterEngine engine = new(writer, stackElement._culture); SsmlParser.Parse(ssmlFile, engine, null); } _elements.Add(new Element(ElementType.SsmlMarkup, sw.ToString())); } } #endregion #region Private Fields // Stack of elements for the SSML document private Stack<StackElement> _elementStack = new(); // <xml:lang> private CultureInfo _culture; // list of all the elements for this prompt builder private List<Element> _elements = new(); // Resource loader for the prompt builder private static ResourceLoader s_resourceLoader = new(); private const string _xmlnsDefault = @"http://www.w3.org/2001/10/synthesis"; #endregion #region Private Type [Serializable] private struct StackElement { internal SsmlElement _possibleChildren; internal SsmlState _state; internal CultureInfo _culture; internal StackElement(SsmlElement possibleChildren, SsmlState state, CultureInfo culture) { _possibleChildren = possibleChildren; _state = state; _culture = culture; } } private enum ElementType { Prosody, Emphasis, SayAs, Phoneme, Sub, Break, Audio, Bookmark, StartVoice, StartParagraph, StartSentence, EndSentence, EndParagraph, StartStyle, EndStyle, EndVoice, Text, SsmlMarkup } private static readonly string[] s_promptBuilderElementName = new string[] { "prosody", "emphasis", "say-as", "phoneme", "sub", "break", "audio", "mark", "voice", "p", "s" }; [Serializable] private struct AttributeItem { internal string _key; internal string _value; internal string _namespace; internal AttributeItem(string key, string value) { _key = key; _value = value; _namespace = null; } internal AttributeItem(string ns, string key, string value) : this(key, value) { _namespace = ns; } } [Serializable] private sealed class Element { internal ElementType _type; internal string _text; internal Collection<AttributeItem> _attributes; internal Element(ElementType type) { _type = type; } internal Element(ElementType type, string text) : this(type) { _text = text; } } #endregion } }
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/tests/CoreMangLib/system/multicastdelegate/MulticastDelegateEquals.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="delegatedefinitions.cs" /> <Compile Include="multicastdelegateequals.cs" /> <Compile Include="verificationagent.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="$(TestSourceDir)Common/CoreCLRTestLibrary/CoreCLRTestLibrary.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="delegatedefinitions.cs" /> <Compile Include="multicastdelegateequals.cs" /> <Compile Include="verificationagent.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="$(TestSourceDir)Common/CoreCLRTestLibrary/CoreCLRTestLibrary.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/libraries/Common/src/Interop/Unix/System.Native/Interop.MapTcpState.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Net.NetworkInformation; using System.Runtime.InteropServices; internal static partial class Interop { internal static partial class Sys { [LibraryImport(Libraries.SystemNative, EntryPoint = "SystemNative_MapTcpState")] [SuppressGCTransition] internal static partial TcpState MapTcpState(int nativeState); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Net.NetworkInformation; using System.Runtime.InteropServices; internal static partial class Interop { internal static partial class Sys { [LibraryImport(Libraries.SystemNative, EntryPoint = "SystemNative_MapTcpState")] [SuppressGCTransition] internal static partial TcpState MapTcpState(int nativeState); } }
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/libraries/System.Security.AccessControl/tests/DiscretionaryAcl/DiscretionaryAcl_Constructor1.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Linq; using System.Security.Principal; using Xunit; namespace System.Security.AccessControl.Tests { /// <summary> /// Constructor1 /// </summary> public partial class DiscretionaryAcl_Constructor1 { public static IEnumerable<object[]> DiscretionaryACL_Constructor1() { yield return new object[] { false, false, 0 }; yield return new object[] { false, true, 0 }; yield return new object[] { true, false, 0 }; yield return new object[] { true, true, 0 }; yield return new object[] { false, false, 1 }; yield return new object[] { false, true, 1 }; yield return new object[] { true, false, 1 }; yield return new object[] { true, true, 1 }; } [Theory] [MemberData(nameof(DiscretionaryACL_Constructor1))] public static bool Constructor1(bool isContainer, bool isDS, int capacity) { bool result = true; byte[] dAclBinaryForm = null; byte[] rAclBinaryForm = null; RawAcl rawAcl = null; DiscretionaryAcl discretionaryAcl = null; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, capacity); rawAcl = new RawAcl(isDS ? GenericAcl.AclRevisionDS : GenericAcl.AclRevision, capacity); if (isContainer == discretionaryAcl.IsContainer && isDS == discretionaryAcl.IsDS && (isDS ? GenericAcl.AclRevisionDS : GenericAcl.AclRevision) == discretionaryAcl.Revision && 0 == discretionaryAcl.Count && 8 == discretionaryAcl.BinaryLength && true == discretionaryAcl.IsCanonical) { dAclBinaryForm = new byte[discretionaryAcl.BinaryLength]; rAclBinaryForm = new byte[rawAcl.BinaryLength]; discretionaryAcl.GetBinaryForm(dAclBinaryForm, 0); rawAcl.GetBinaryForm(rAclBinaryForm, 0); if (!Utils.IsBinaryFormEqual(dAclBinaryForm, rAclBinaryForm)) result = false; //redundant index check for (int i = 0; i < discretionaryAcl.Count; i++) { if (!Utils.IsAceEqual(discretionaryAcl[i], rawAcl[i])) { result = false; break; } } } else { result = false; } Assert.True(result); return result; } [Fact] public static void Constructor1_NegativeCapacity() { Assert.Throws<ArgumentOutOfRangeException>(() => new DiscretionaryAcl(false, false, -1)); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Linq; using System.Security.Principal; using Xunit; namespace System.Security.AccessControl.Tests { /// <summary> /// Constructor1 /// </summary> public partial class DiscretionaryAcl_Constructor1 { public static IEnumerable<object[]> DiscretionaryACL_Constructor1() { yield return new object[] { false, false, 0 }; yield return new object[] { false, true, 0 }; yield return new object[] { true, false, 0 }; yield return new object[] { true, true, 0 }; yield return new object[] { false, false, 1 }; yield return new object[] { false, true, 1 }; yield return new object[] { true, false, 1 }; yield return new object[] { true, true, 1 }; } [Theory] [MemberData(nameof(DiscretionaryACL_Constructor1))] public static bool Constructor1(bool isContainer, bool isDS, int capacity) { bool result = true; byte[] dAclBinaryForm = null; byte[] rAclBinaryForm = null; RawAcl rawAcl = null; DiscretionaryAcl discretionaryAcl = null; discretionaryAcl = new DiscretionaryAcl(isContainer, isDS, capacity); rawAcl = new RawAcl(isDS ? GenericAcl.AclRevisionDS : GenericAcl.AclRevision, capacity); if (isContainer == discretionaryAcl.IsContainer && isDS == discretionaryAcl.IsDS && (isDS ? GenericAcl.AclRevisionDS : GenericAcl.AclRevision) == discretionaryAcl.Revision && 0 == discretionaryAcl.Count && 8 == discretionaryAcl.BinaryLength && true == discretionaryAcl.IsCanonical) { dAclBinaryForm = new byte[discretionaryAcl.BinaryLength]; rAclBinaryForm = new byte[rawAcl.BinaryLength]; discretionaryAcl.GetBinaryForm(dAclBinaryForm, 0); rawAcl.GetBinaryForm(rAclBinaryForm, 0); if (!Utils.IsBinaryFormEqual(dAclBinaryForm, rAclBinaryForm)) result = false; //redundant index check for (int i = 0; i < discretionaryAcl.Count; i++) { if (!Utils.IsAceEqual(discretionaryAcl[i], rawAcl[i])) { result = false; break; } } } else { result = false; } Assert.True(result); return result; } [Fact] public static void Constructor1_NegativeCapacity() { Assert.Throws<ArgumentOutOfRangeException>(() => new DiscretionaryAcl(false, false, -1)); } } }
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/libraries/System.Net.Http/src/System/Net/Http/Headers/Int64NumberHeaderParser.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Globalization; namespace System.Net.Http.Headers { internal sealed class Int64NumberHeaderParser : BaseHeaderParser { // Note that we don't need a custom comparer even though we have a value type that gets boxed (comparing two // equal boxed value types returns 'false' since the object instances used for boxing the two values are // different). The reason is that the comparer is only used by HttpHeaders when comparing values in a collection. // Value types are never used in collections (in fact HttpHeaderValueCollection expects T to be a reference // type). internal static readonly Int64NumberHeaderParser Parser = new Int64NumberHeaderParser(); private Int64NumberHeaderParser() : base(false) { } public override string ToString(object value) { Debug.Assert(value is long); return ((long)value).ToString(NumberFormatInfo.InvariantInfo); } protected override int GetParsedValueLength(string value, int startIndex, object? storeValue, out object? parsedValue) { parsedValue = null; int numberLength = HttpRuleParser.GetNumberLength(value, startIndex, false); if ((numberLength == 0) || (numberLength > HttpRuleParser.MaxInt64Digits)) { return 0; } long result; if (!HeaderUtilities.TryParseInt64(value, startIndex, numberLength, out result)) { return 0; } parsedValue = result; return numberLength; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Globalization; namespace System.Net.Http.Headers { internal sealed class Int64NumberHeaderParser : BaseHeaderParser { // Note that we don't need a custom comparer even though we have a value type that gets boxed (comparing two // equal boxed value types returns 'false' since the object instances used for boxing the two values are // different). The reason is that the comparer is only used by HttpHeaders when comparing values in a collection. // Value types are never used in collections (in fact HttpHeaderValueCollection expects T to be a reference // type). internal static readonly Int64NumberHeaderParser Parser = new Int64NumberHeaderParser(); private Int64NumberHeaderParser() : base(false) { } public override string ToString(object value) { Debug.Assert(value is long); return ((long)value).ToString(NumberFormatInfo.InvariantInfo); } protected override int GetParsedValueLength(string value, int startIndex, object? storeValue, out object? parsedValue) { parsedValue = null; int numberLength = HttpRuleParser.GetNumberLength(value, startIndex, false); if ((numberLength == 0) || (numberLength > HttpRuleParser.MaxInt64Digits)) { return 0; } long result; if (!HeaderUtilities.TryParseInt64(value, startIndex, numberLength, out result)) { return 0; } parsedValue = result; return numberLength; } } }
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/coreclr/tools/r2rtest/Buckets.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.CommandLine; using System.Diagnostics; using System.IO; using System.Linq; namespace R2RTest { public class Buckets { private Dictionary<string, List<ProcessInfo>> _bucketMap; public Buckets() { _bucketMap = new Dictionary<string, List<ProcessInfo>>(StringComparer.OrdinalIgnoreCase); } public void AddCompilation(ProcessInfo process) => Add(AnalyzeCompilationFailure(process), process); public void AddExecution(ProcessInfo process) => Add(AnalyzeExecutionFailure(process), process); public void Add(string bucket, ProcessInfo process) { List<ProcessInfo> processes; if (!_bucketMap.TryGetValue(bucket, out processes)) { processes = new List<ProcessInfo>(); _bucketMap.Add(bucket, processes); } processes.Add(process); } public void WriteToFile(string outputFile, bool detailed) { using (StreamWriter outputStream = new StreamWriter(outputFile)) { WriteToStream(outputStream, detailed); } } public void WriteToStream(StreamWriter output, bool detailed) { output.WriteLine($@"#buckets: {_bucketMap.Count}, #failures: {_bucketMap.Sum(b => b.Value.Count)}"); if (_bucketMap.Count == 0) { // No bucketing info to display return; } IEnumerable<KeyValuePair<string, List<ProcessInfo>>> orderedBuckets = _bucketMap.OrderByDescending(bucket => bucket.Value.Count); foreach (KeyValuePair<string, List<ProcessInfo>> bucketKvp in orderedBuckets) { bucketKvp.Value.Sort((a, b) => a.Parameters.OutputFileName.CompareTo(b.Parameters.OutputFileName)); output.WriteLine($@" [{bucketKvp.Value.Count} failures] {bucketKvp.Key}"); } output.WriteLine(); output.WriteLine("Detailed bucket info:"); foreach (KeyValuePair<string, List<ProcessInfo>> bucketKvp in orderedBuckets) { output.WriteLine(""); output.WriteLine($@"Bucket name: {bucketKvp.Key}"); output.WriteLine($@"Failing tests ({bucketKvp.Value.Count} total):"); foreach (ProcessInfo failure in bucketKvp.Value) { output.WriteLine($@" {failure.Parameters.OutputFileName}"); } if (detailed) { output.WriteLine(); output.WriteLine($@"Detailed test failures:"); foreach (ProcessInfo failure in bucketKvp.Value) { output.WriteLine($@"Test: {failure.Parameters.OutputFileName}"); try { output.WriteLine(File.ReadAllText(failure.Parameters.LogPath)); } catch (Exception ex) { output.WriteLine($"Error reading file {failure.Parameters.LogPath}: {ex.Message}"); } output.WriteLine(); } } } } private static string AnalyzeCompilationFailure(ProcessInfo process) { try { if (process.TimedOut) { return "Timed out"; } string[] lines = File.ReadAllLines(process.Parameters.LogPath); for (int lineIndex = 2; lineIndex < lines.Length; lineIndex++) { string line = lines[lineIndex]; if (line.Length == 0 || line.StartsWith("EXEC : warning") || line.StartsWith("To repro,") || line.StartsWith("Emitting R2R PE file") || line.StartsWith("Moving R2R PE file") || line.StartsWith("Warning: ") || line.StartsWith("Info: ") || line == "Assertion Failed") { continue; } return line; } return string.Join("; ", lines); } catch (Exception ex) { return ex.Message; } } private static string AnalyzeExecutionFailure(ProcessInfo process) { try { if (process.TimedOut) { return "Timed out"; } string[] lines = File.ReadAllLines(process.Parameters.LogPath); for (int lineIndex = 0; lineIndex < lines.Length; lineIndex++) { string line = lines[lineIndex]; if (line.StartsWith("Assert failure")) { int openParen = line.IndexOf('('); int closeParen = line.IndexOf(')', openParen + 1); if (openParen > 0 && closeParen > openParen) { line = line.Substring(0, openParen) + line.Substring(closeParen + 1); } return line; } else if (line.StartsWith("Unhandled exception", StringComparison.OrdinalIgnoreCase)) { int leftBracket = line.IndexOf('['); int rightBracket = line.IndexOf(']', leftBracket + 1); if (leftBracket >= 0 && rightBracket > leftBracket) { line = line.Substring(0, leftBracket) + line.Substring(rightBracket + 1); } for (int detailLineIndex = lineIndex + 1; detailLineIndex < lines.Length; detailLineIndex++) { string detailLine = lines[detailLineIndex].TrimStart(); if (!detailLine.StartsWith("--->")) { break; } line += " " + detailLine; } return line; } else if (line.StartsWith("Fatal error", StringComparison.OrdinalIgnoreCase)) { if (lineIndex + 1 < lines.Length && lines[lineIndex + 1].TrimStart().StartsWith("at ")) { line += lines[lineIndex + 1]; } return line; } } return $"Exit code: {process.ExitCode} = 0x{process.ExitCode:X8}, expected {process.Parameters.ExpectedExitCode}"; } catch (Exception ex) { return ex.Message; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.CommandLine; using System.Diagnostics; using System.IO; using System.Linq; namespace R2RTest { public class Buckets { private Dictionary<string, List<ProcessInfo>> _bucketMap; public Buckets() { _bucketMap = new Dictionary<string, List<ProcessInfo>>(StringComparer.OrdinalIgnoreCase); } public void AddCompilation(ProcessInfo process) => Add(AnalyzeCompilationFailure(process), process); public void AddExecution(ProcessInfo process) => Add(AnalyzeExecutionFailure(process), process); public void Add(string bucket, ProcessInfo process) { List<ProcessInfo> processes; if (!_bucketMap.TryGetValue(bucket, out processes)) { processes = new List<ProcessInfo>(); _bucketMap.Add(bucket, processes); } processes.Add(process); } public void WriteToFile(string outputFile, bool detailed) { using (StreamWriter outputStream = new StreamWriter(outputFile)) { WriteToStream(outputStream, detailed); } } public void WriteToStream(StreamWriter output, bool detailed) { output.WriteLine($@"#buckets: {_bucketMap.Count}, #failures: {_bucketMap.Sum(b => b.Value.Count)}"); if (_bucketMap.Count == 0) { // No bucketing info to display return; } IEnumerable<KeyValuePair<string, List<ProcessInfo>>> orderedBuckets = _bucketMap.OrderByDescending(bucket => bucket.Value.Count); foreach (KeyValuePair<string, List<ProcessInfo>> bucketKvp in orderedBuckets) { bucketKvp.Value.Sort((a, b) => a.Parameters.OutputFileName.CompareTo(b.Parameters.OutputFileName)); output.WriteLine($@" [{bucketKvp.Value.Count} failures] {bucketKvp.Key}"); } output.WriteLine(); output.WriteLine("Detailed bucket info:"); foreach (KeyValuePair<string, List<ProcessInfo>> bucketKvp in orderedBuckets) { output.WriteLine(""); output.WriteLine($@"Bucket name: {bucketKvp.Key}"); output.WriteLine($@"Failing tests ({bucketKvp.Value.Count} total):"); foreach (ProcessInfo failure in bucketKvp.Value) { output.WriteLine($@" {failure.Parameters.OutputFileName}"); } if (detailed) { output.WriteLine(); output.WriteLine($@"Detailed test failures:"); foreach (ProcessInfo failure in bucketKvp.Value) { output.WriteLine($@"Test: {failure.Parameters.OutputFileName}"); try { output.WriteLine(File.ReadAllText(failure.Parameters.LogPath)); } catch (Exception ex) { output.WriteLine($"Error reading file {failure.Parameters.LogPath}: {ex.Message}"); } output.WriteLine(); } } } } private static string AnalyzeCompilationFailure(ProcessInfo process) { try { if (process.TimedOut) { return "Timed out"; } string[] lines = File.ReadAllLines(process.Parameters.LogPath); for (int lineIndex = 2; lineIndex < lines.Length; lineIndex++) { string line = lines[lineIndex]; if (line.Length == 0 || line.StartsWith("EXEC : warning") || line.StartsWith("To repro,") || line.StartsWith("Emitting R2R PE file") || line.StartsWith("Moving R2R PE file") || line.StartsWith("Warning: ") || line.StartsWith("Info: ") || line == "Assertion Failed") { continue; } return line; } return string.Join("; ", lines); } catch (Exception ex) { return ex.Message; } } private static string AnalyzeExecutionFailure(ProcessInfo process) { try { if (process.TimedOut) { return "Timed out"; } string[] lines = File.ReadAllLines(process.Parameters.LogPath); for (int lineIndex = 0; lineIndex < lines.Length; lineIndex++) { string line = lines[lineIndex]; if (line.StartsWith("Assert failure")) { int openParen = line.IndexOf('('); int closeParen = line.IndexOf(')', openParen + 1); if (openParen > 0 && closeParen > openParen) { line = line.Substring(0, openParen) + line.Substring(closeParen + 1); } return line; } else if (line.StartsWith("Unhandled exception", StringComparison.OrdinalIgnoreCase)) { int leftBracket = line.IndexOf('['); int rightBracket = line.IndexOf(']', leftBracket + 1); if (leftBracket >= 0 && rightBracket > leftBracket) { line = line.Substring(0, leftBracket) + line.Substring(rightBracket + 1); } for (int detailLineIndex = lineIndex + 1; detailLineIndex < lines.Length; detailLineIndex++) { string detailLine = lines[detailLineIndex].TrimStart(); if (!detailLine.StartsWith("--->")) { break; } line += " " + detailLine; } return line; } else if (line.StartsWith("Fatal error", StringComparison.OrdinalIgnoreCase)) { if (lineIndex + 1 < lines.Length && lines[lineIndex + 1].TrimStart().StartsWith("at ")) { line += lines[lineIndex + 1]; } return line; } } return $"Exit code: {process.ExitCode} = 0x{process.ExitCode:X8}, expected {process.Parameters.ExpectedExitCode}"; } catch (Exception ex) { return ex.Message; } } } }
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest10/Generated10.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated10.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated10.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,477
Add armv8l virtualized 32-bit ARM core detection
## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
ayakael
2022-03-10T21:05:11Z
2022-03-11T09:29:12Z
f82fe1d83f1a0e22025e186fdc6f4d86de0a83a1
12816261feee53cc57fab17f088ee6544ef6ffdb
Add armv8l virtualized 32-bit ARM core detection. ## Expected behavior Runtime should be able to build within 32-bit userspace on 64-bit ARM cores. ## Actual behavior Per dotnet/source-build#2781, runtime does not know to parse as `arm` when output of `uname -m` is `armv8l`. ## Proposed modifications This pull request modifies existing logics to parse `armv8l` in the same way as `armv7l`. ## Varia Parallel merge request on Alpine's [side](https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/31812) Arm support on source-build at https://github.com/dotnet/installer/pull/13378 Made as part of Alpine Linux dotnet6 packaging project, see dotnet/source-build#2782
./src/tests/JIT/Regression/CLR-x86-JIT/V1-M09.5-PDC/b18852/b18852.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly ILGEN_0x38e9694e {} .class ILGEN_0x38e9694e { .method static int32 main() { .entrypoint .maxstack 18 .locals (unsigned int8 local_0xa,float32 local_0x10, int32 ecode) ldc.i4.1 stloc ecode .try { ldc.i4 0x49881c6f stloc local_0xa ldc.r4 float32(0x26b1a70) stloc local_0x10 ldc.i4.3 ldc.i4.8 neg mul.ovf conv.ovf.u2.un ldc.i8 44 conv.i ldloc local_0xa not ldc.i4 3 div add div conv.r4 stloc local_0x10 leave xx } catch [mscorlib]System.OverflowException { pop ldc.i4.0 stloc ecode leave xx } xx: ldloc ecode ldc.i4 100 add ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly ILGEN_0x38e9694e {} .class ILGEN_0x38e9694e { .method static int32 main() { .entrypoint .maxstack 18 .locals (unsigned int8 local_0xa,float32 local_0x10, int32 ecode) ldc.i4.1 stloc ecode .try { ldc.i4 0x49881c6f stloc local_0xa ldc.r4 float32(0x26b1a70) stloc local_0x10 ldc.i4.3 ldc.i4.8 neg mul.ovf conv.ovf.u2.un ldc.i8 44 conv.i ldloc local_0xa not ldc.i4 3 div add div conv.r4 stloc local_0x10 leave xx } catch [mscorlib]System.OverflowException { pop ldc.i4.0 stloc ecode leave xx } xx: ldloc ecode ldc.i4 100 add ret } }
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./eng/native/configurecompiler.cmake
include(${CMAKE_CURRENT_LIST_DIR}/configuretools.cmake) # Set initial flags for each configuration set(CMAKE_EXPORT_COMPILE_COMMANDS ON) set(CMAKE_C_STANDARD 11) set(CMAKE_C_STANDARD_REQUIRED ON) set(CMAKE_CXX_STANDARD 11) set(CMAKE_CXX_STANDARD_REQUIRED ON) include(CheckCCompilerFlag) include(CheckCXXCompilerFlag) # "configureoptimization.cmake" must be included after CLR_CMAKE_HOST_UNIX has been set. include(${CMAKE_CURRENT_LIST_DIR}/configureoptimization.cmake) #----------------------------------------------------- # Initialize Cmake compiler flags and other variables #----------------------------------------------------- if (CLR_CMAKE_HOST_UNIX) add_compile_options(-g) add_compile_options(-Wall) if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Wno-null-conversion) else() add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-Werror=conversion-null>) endif() endif() if (CMAKE_CONFIGURATION_TYPES) # multi-configuration generator? set(CMAKE_CONFIGURATION_TYPES "Debug;Checked;Release;RelWithDebInfo" CACHE STRING "" FORCE) endif (CMAKE_CONFIGURATION_TYPES) set(CMAKE_C_FLAGS_CHECKED "") set(CMAKE_CXX_FLAGS_CHECKED "") set(CMAKE_EXE_LINKER_FLAGS_CHECKED "") set(CMAKE_SHARED_LINKER_FLAGS_CHECKED "") set(CMAKE_SHARED_LINKER_FLAGS_DEBUG "") set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "") set(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "") set(CMAKE_EXE_LINKER_FLAGS_DEBUG "") set(CMAKE_EXE_LINKER_FLAGS_DEBUG "") set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "") add_compile_definitions("$<$<CONFIG:DEBUG>:DEBUG;_DEBUG;_DBG;URTBLDENV_FRIENDLY=Debug;BUILDENV_DEBUG=1>") add_compile_definitions("$<$<CONFIG:CHECKED>:DEBUG;_DEBUG;_DBG;URTBLDENV_FRIENDLY=Checked;BUILDENV_CHECKED=1>") add_compile_definitions("$<$<OR:$<CONFIG:RELEASE>,$<CONFIG:RELWITHDEBINFO>>:NDEBUG;URTBLDENV_FRIENDLY=Retail>") if (MSVC) add_linker_flag(/guard:cf) # Linker flags # set (WINDOWS_SUBSYSTEM_VERSION 6.01) if (CLR_CMAKE_HOST_ARCH_ARM) set(WINDOWS_SUBSYSTEM_VERSION 6.02) #windows subsystem - arm minimum is 6.02 elseif(CLR_CMAKE_HOST_ARCH_ARM64) set(WINDOWS_SUBSYSTEM_VERSION 6.03) #windows subsystem - arm64 minimum is 6.03 endif () #Do not create Side-by-Side Assembly Manifest set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /MANIFEST:NO") # can handle addresses larger than 2 gigabytes set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /LARGEADDRESSAWARE") #shrink pdb size set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /PDBCOMPRESS") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /DEBUG") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /IGNORE:4197,4013,4254,4070,4221") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /SUBSYSTEM:WINDOWS,${WINDOWS_SUBSYSTEM_VERSION}") set(CMAKE_STATIC_LINKER_FLAGS "${CMAKE_STATIC_LINKER_FLAGS} /IGNORE:4221") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /DEBUG") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /PDBCOMPRESS") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /STACK:1572864") # Checked build specific flags add_linker_flag(/INCREMENTAL:NO CHECKED) # prevent "warning LNK4075: ignoring '/INCREMENTAL' due to '/OPT:REF' specification" add_linker_flag(/OPT:REF CHECKED) add_linker_flag(/OPT:NOICF CHECKED) # Release build specific flags add_linker_flag(/LTCG RELEASE) add_linker_flag(/OPT:REF RELEASE) add_linker_flag(/OPT:ICF RELEASE) add_linker_flag(/INCREMENTAL:NO RELEASE) set(CMAKE_STATIC_LINKER_FLAGS_RELEASE "${CMAKE_STATIC_LINKER_FLAGS_RELEASE} /LTCG") # ReleaseWithDebugInfo build specific flags add_linker_flag(/LTCG RELWITHDEBINFO) add_linker_flag(/OPT:REF RELWITHDEBINFO) add_linker_flag(/OPT:ICF RELWITHDEBINFO) set(CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO} /LTCG") # Force uCRT to be dynamically linked for Release build add_linker_flag(/NODEFAULTLIB:libucrt.lib RELEASE) add_linker_flag(/DEFAULTLIB:ucrt.lib RELEASE) elseif (CLR_CMAKE_HOST_UNIX) # Set the values to display when interactively configuring CMAKE_BUILD_TYPE set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "DEBUG;CHECKED;RELEASE;RELWITHDEBINFO") # Use uppercase CMAKE_BUILD_TYPE for the string comparisons below string(TOUPPER ${CMAKE_BUILD_TYPE} UPPERCASE_CMAKE_BUILD_TYPE) set(CLR_SANITIZE_CXX_OPTIONS "") set(CLR_SANITIZE_LINK_OPTIONS "") # set the CLANG sanitizer flags for debug build if(UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG OR UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED) # obtain settings from running enablesanitizers.sh string(FIND "$ENV{DEBUG_SANITIZERS}" "asan" __ASAN_POS) string(FIND "$ENV{DEBUG_SANITIZERS}" "ubsan" __UBSAN_POS) if ((${__ASAN_POS} GREATER -1) OR (${__UBSAN_POS} GREATER -1)) list(APPEND CLR_SANITIZE_CXX_OPTIONS -fsanitize-blacklist=${CMAKE_CURRENT_SOURCE_DIR}/sanitizerblacklist.txt) set (CLR_CXX_SANITIZERS "") set (CLR_LINK_SANITIZERS "") if (${__ASAN_POS} GREATER -1) list(APPEND CLR_CXX_SANITIZERS address) list(APPEND CLR_LINK_SANITIZERS address) set(CLR_SANITIZE_CXX_FLAGS "${CLR_SANITIZE_CXX_FLAGS}address,") set(CLR_SANITIZE_LINK_FLAGS "${CLR_SANITIZE_LINK_FLAGS}address,") add_definitions(-DHAS_ASAN) message("Address Sanitizer (asan) enabled") endif () if (${__UBSAN_POS} GREATER -1) # all sanitizier flags are enabled except alignment (due to heavy use of __unaligned modifier) list(APPEND CLR_CXX_SANITIZERS "bool" bounds enum float-cast-overflow float-divide-by-zero "function" integer nonnull-attribute null object-size "return" returns-nonnull-attribute shift unreachable vla-bound vptr) list(APPEND CLR_LINK_SANITIZERS undefined) message("Undefined Behavior Sanitizer (ubsan) enabled") endif () list(JOIN CLR_CXX_SANITIZERS "," CLR_CXX_SANITIZERS_OPTIONS) list(APPEND CLR_SANITIZE_CXX_OPTIONS "-fsanitize=${CLR_CXX_SANITIZERS_OPTIONS}") list(JOIN CLR_LINK_SANITIZERS "," CLR_LINK_SANITIZERS_OPTIONS) list(APPEND CLR_SANITIZE_LINK_OPTIONS "-fsanitize=${CLR_LINK_SANITIZERS_OPTIONS}") # -fdata-sections -ffunction-sections: each function has own section instead of one per .o file (needed for --gc-sections) # -O1: optimization level used instead of -O0 to avoid compile error "invalid operand for inline asm constraint" add_compile_options("$<$<OR:$<CONFIG:DEBUG>,$<CONFIG:CHECKED>>:${CLR_SANITIZE_CXX_OPTIONS};-fdata-sections;--ffunction-sections;-O1>") add_linker_flag("${CLR_SANITIZE_LINK_OPTIONS}" DEBUG CHECKED) # -Wl and --gc-sections: drop unused sections\functions (similar to Windows /Gy function-level-linking) add_linker_flag("-Wl,--gc-sections" DEBUG CHECKED) endif () endif(UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG OR UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED) if(CLR_CMAKE_HOST_BROWSER) # The emscripten build has additional warnings so -Werror breaks add_compile_options(-Wno-unused-parameter) add_compile_options(-Wno-alloca) add_compile_options(-Wno-implicit-int-float-conversion) endif() endif(MSVC) # CLR_ADDITIONAL_LINKER_FLAGS - used for passing additional arguments to linker # CLR_ADDITIONAL_COMPILER_OPTIONS - used for passing additional arguments to compiler # # For example: # ./build-native.sh cmakeargs "-DCLR_ADDITIONAL_COMPILER_OPTIONS=<...>" cmakeargs "-DCLR_ADDITIONAL_LINKER_FLAGS=<...>" # if(CLR_CMAKE_HOST_UNIX) foreach(ADDTL_LINKER_FLAG ${CLR_ADDITIONAL_LINKER_FLAGS}) add_linker_flag(${ADDTL_LINKER_FLAG}) endforeach() endif(CLR_CMAKE_HOST_UNIX) if(CLR_CMAKE_HOST_LINUX) add_compile_options($<$<COMPILE_LANGUAGE:ASM>:-Wa,--noexecstack>) add_linker_flag(-Wl,--build-id=sha1) add_linker_flag(-Wl,-z,relro,-z,now) elseif(CLR_CMAKE_HOST_FREEBSD) add_compile_options($<$<COMPILE_LANGUAGE:ASM>:-Wa,--noexecstack>) add_linker_flag("-Wl,--build-id=sha1") elseif(CLR_CMAKE_HOST_SUNOS) add_compile_options($<$<COMPILE_LANGUAGE:ASM>:-Wa,--noexecstack>) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fstack-protector") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstack-protector") add_definitions(-D__EXTENSIONS__ -D_XPG4_2 -D_POSIX_PTHREAD_SEMANTICS) elseif(CLR_CMAKE_HOST_OSX AND NOT CLR_CMAKE_HOST_IOS AND NOT CLR_CMAKE_HOST_TVOS) add_definitions(-D_XOPEN_SOURCE) add_linker_flag("-Wl,-bind_at_load") endif() #------------------------------------ # Definitions (for platform) #----------------------------------- if (CLR_CMAKE_HOST_ARCH_AMD64) set(ARCH_HOST_NAME x64) add_definitions(-DHOST_AMD64 -DHOST_64BIT) elseif (CLR_CMAKE_HOST_ARCH_I386) set(ARCH_HOST_NAME x86) add_definitions(-DHOST_X86) elseif (CLR_CMAKE_HOST_ARCH_ARM) set(ARCH_HOST_NAME arm) add_definitions(-DHOST_ARM) elseif (CLR_CMAKE_HOST_ARCH_ARMV6) set(ARCH_HOST_NAME armv6) add_definitions(-DHOST_ARMV6) elseif (CLR_CMAKE_HOST_ARCH_ARM64) set(ARCH_HOST_NAME arm64) add_definitions(-DHOST_ARM64 -DHOST_64BIT) elseif (CLR_CMAKE_HOST_ARCH_LOONGARCH64) set(ARCH_HOST_NAME loongarch64) add_definitions(-DHOST_LOONGARCH64 -DHOST_64BIT) elseif (CLR_CMAKE_HOST_ARCH_S390X) set(ARCH_HOST_NAME s390x) add_definitions(-DHOST_S390X -DHOST_64BIT -DBIGENDIAN) elseif (CLR_CMAKE_HOST_ARCH_WASM) set(ARCH_HOST_NAME wasm) add_definitions(-DHOST_WASM -DHOST_32BIT=1) elseif (CLR_CMAKE_HOST_ARCH_MIPS64) set(ARCH_HOST_NAME mips64) add_definitions(-DHOST_MIPS64 -DHOST_64BIT=1) else () clr_unknown_arch() endif () if (CLR_CMAKE_HOST_UNIX) if(CLR_CMAKE_HOST_LINUX) if(CLR_CMAKE_HOST_UNIX_AMD64) message("Detected Linux x86_64") elseif(CLR_CMAKE_HOST_UNIX_ARM) message("Detected Linux ARM") elseif(CLR_CMAKE_HOST_UNIX_ARMV6) message("Detected Linux ARMv6") elseif(CLR_CMAKE_HOST_UNIX_ARM64) message("Detected Linux ARM64") elseif(CLR_CMAKE_HOST_UNIX_LOONGARCH64) message("Detected Linux LOONGARCH64") elseif(CLR_CMAKE_HOST_UNIX_X86) message("Detected Linux i686") elseif(CLR_CMAKE_HOST_UNIX_S390X) message("Detected Linux s390x") else() clr_unknown_arch() endif() endif(CLR_CMAKE_HOST_LINUX) endif(CLR_CMAKE_HOST_UNIX) if (CLR_CMAKE_HOST_UNIX) add_definitions(-DHOST_UNIX) if(CLR_CMAKE_HOST_OSX OR CLR_CMAKE_HOST_MACCATALYST) add_definitions(-DHOST_OSX) if(CLR_CMAKE_HOST_UNIX_AMD64) message("Detected OSX x86_64") elseif(CLR_CMAKE_HOST_UNIX_ARM64) message("Detected OSX ARM64") else() clr_unknown_arch() endif() elseif(CLR_CMAKE_HOST_FREEBSD) message("Detected FreeBSD amd64") elseif(CLR_CMAKE_HOST_NETBSD) message("Detected NetBSD amd64") elseif(CLR_CMAKE_HOST_SUNOS) message("Detected SunOS amd64") endif(CLR_CMAKE_HOST_OSX OR CLR_CMAKE_HOST_MACCATALYST) endif(CLR_CMAKE_HOST_UNIX) if (CLR_CMAKE_HOST_WIN32) add_definitions(-DHOST_WINDOWS) # Define the CRT lib references that link into Desktop imports set(STATIC_MT_CRT_LIB "libcmt$<$<OR:$<CONFIG:Debug>,$<CONFIG:Checked>>:d>.lib") set(STATIC_MT_VCRT_LIB "libvcruntime$<$<OR:$<CONFIG:Debug>,$<CONFIG:Checked>>:d>.lib") set(STATIC_MT_CPP_LIB "libcpmt$<$<OR:$<CONFIG:Debug>,$<CONFIG:Checked>>:d>.lib") endif(CLR_CMAKE_HOST_WIN32) # Unconditionally define _FILE_OFFSET_BITS as 64 on all platforms. add_definitions(-D_FILE_OFFSET_BITS=64) # Architecture specific files folder name if (CLR_CMAKE_TARGET_ARCH_AMD64) set(ARCH_SOURCES_DIR amd64) set(ARCH_TARGET_NAME x64) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_AMD64>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_64BIT>) elseif (CLR_CMAKE_TARGET_ARCH_ARM64) set(ARCH_SOURCES_DIR arm64) set(ARCH_TARGET_NAME arm64) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_ARM64>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_64BIT>) elseif (CLR_CMAKE_TARGET_ARCH_ARM) set(ARCH_SOURCES_DIR arm) set(ARCH_TARGET_NAME arm) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_ARM>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_32BIT>) elseif (CLR_CMAKE_TARGET_ARCH_ARMV6) set(ARCH_SOURCES_DIR arm) set(ARCH_TARGET_NAME armv6) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_ARM>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_ARMV6>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_32BIT>) elseif (CLR_CMAKE_TARGET_ARCH_I386) set(ARCH_TARGET_NAME x86) set(ARCH_SOURCES_DIR i386) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_X86>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_32BIT>) elseif (CLR_CMAKE_TARGET_ARCH_LOONGARCH64) set(ARCH_TARGET_NAME loongarch64) set(ARCH_SOURCES_DIR loongarch64) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_LOONGARCH64>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_64BIT>) elseif (CLR_CMAKE_TARGET_ARCH_S390X) set(ARCH_TARGET_NAME s390x) set(ARCH_SOURCES_DIR s390x) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_S390X>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_64BIT>) elseif (CLR_CMAKE_TARGET_ARCH_WASM) set(ARCH_TARGET_NAME wasm) set(ARCH_SOURCES_DIR wasm) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_WASM>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_32BIT>) elseif (CLR_CMAKE_TARGET_ARCH_MIPS64) set(ARCH_TARGET_NAME mips64) set(ARCH_SOURCES_DIR mips64) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_MIPS64>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_64BIT>) else () clr_unknown_arch() endif () #-------------------------------------- # Compile Options #-------------------------------------- if (CLR_CMAKE_HOST_UNIX) # Disable frame pointer optimizations so profilers can get better call stacks add_compile_options(-fno-omit-frame-pointer) # The -fms-extensions enable the stuff like __if_exists, __declspec(uuid()), etc. add_compile_options(-fms-extensions) #-fms-compatibility Enable full Microsoft Visual C++ compatibility #-fms-extensions Accept some non-standard constructs supported by the Microsoft compiler # Make signed arithmetic overflow of addition, subtraction, and multiplication wrap around # using twos-complement representation (this is normally undefined according to the C++ spec). add_compile_options(-fwrapv) if(CLR_CMAKE_HOST_OSX OR CLR_CMAKE_HOST_MACCATALYST) # We cannot enable "stack-protector-strong" on OS X due to a bug in clang compiler (current version 7.0.2) add_compile_options(-fstack-protector) elseif(NOT CLR_CMAKE_HOST_BROWSER) check_c_compiler_flag(-fstack-protector-strong COMPILER_SUPPORTS_F_STACK_PROTECTOR_STRONG) if (COMPILER_SUPPORTS_F_STACK_PROTECTOR_STRONG) add_compile_options(-fstack-protector-strong) endif() endif(CLR_CMAKE_HOST_OSX OR CLR_CMAKE_HOST_MACCATALYST) # Suppress warnings-as-errors in release branches to reduce servicing churn if (PRERELEASE) add_compile_options(-Werror) endif(PRERELEASE) # Disabled common warnings add_compile_options(-Wno-unused-variable) add_compile_options(-Wno-unused-value) add_compile_options(-Wno-unused-function) add_compile_options(-Wno-tautological-compare) add_compile_options(-Wno-unknown-pragmas) # Explicitly enabled warnings check_c_compiler_flag(-Wimplicit-fallthrough COMPILER_SUPPORTS_W_IMPLICIT_FALLTHROUGH) if (COMPILER_SUPPORTS_W_IMPLICIT_FALLTHROUGH) add_compile_options(-Wimplicit-fallthrough) endif() #These seem to indicate real issues add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-Wno-invalid-offsetof>) add_compile_options(-Wno-unused-but-set-variable) # Turn off floating point expression contraction because it is considered a value changing # optimization in the IEEE 754 specification and is therefore considered unsafe. add_compile_options(-ffp-contract=off) if (CMAKE_C_COMPILER_ID MATCHES "Clang") add_compile_options(-Wno-unknown-warning-option) # The -ferror-limit is helpful during the porting, it makes sure the compiler doesn't stop # after hitting just about 20 errors. add_compile_options(-ferror-limit=4096) # Disabled warnings add_compile_options(-Wno-unused-private-field) # There are constants of type BOOL used in a condition. But BOOL is defined as int # and so the compiler thinks that there is a mistake. add_compile_options(-Wno-constant-logical-operand) # We use pshpack1/2/4/8.h and poppack.h headers to set and restore packing. However # clang 6.0 complains when the packing change lifetime is not contained within # a header file. add_compile_options(-Wno-pragma-pack) # The following warning indicates that an attribute __attribute__((__ms_struct__)) was applied # to a struct or a class that has virtual members or a base class. In that case, clang # may not generate the same object layout as MSVC. add_compile_options(-Wno-incompatible-ms-struct) add_compile_options(-Wno-reserved-identifier) else() add_compile_options(-Wno-uninitialized) add_compile_options(-Wno-strict-aliasing) add_compile_options(-Wno-array-bounds) add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-Wno-class-memaccess>) add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-Wno-misleading-indentation>) add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-Wno-stringop-overflow>) add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-Wno-stringop-truncation>) add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-Wno-placement-new>) if (CMAKE_CXX_COMPILER_ID) check_cxx_compiler_flag(-faligned-new COMPILER_SUPPORTS_F_ALIGNED_NEW) if (COMPILER_SUPPORTS_F_ALIGNED_NEW) add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-faligned-new>) endif() endif() endif() # Some architectures (e.g., ARM) assume char type is unsigned while CoreCLR assumes char is signed # as x64 does. It has been causing issues in ARM (https://github.com/dotnet/runtime/issues/5778) add_compile_options(-fsigned-char) # We mark the function which needs exporting with DLLEXPORT add_compile_options(-fvisibility=hidden) # Specify the minimum supported version of macOS # Mac Catalyst needs a special CFLAG, exclusive with mmacosx-version-min if(CLR_CMAKE_HOST_MACCATALYST) # Somewhere between CMake 3.17 and 3.19.4, it became impossible to not pass # a value for mmacosx-version-min (blank CMAKE_OSX_DEPLOYMENT_TARGET gets # replaced with a default value, and always gets expanded to an OS version. # https://gitlab.kitware.com/cmake/cmake/-/issues/20132 # We need to disable the warning that -tagret replaces -mmacosx-version-min set(DISABLE_OVERRIDING_MIN_VERSION_ERROR -Wno-overriding-t-option) add_link_options(-Wno-overriding-t-option) if(CLR_CMAKE_HOST_ARCH_ARM64) set(MACOS_VERSION_MIN_FLAGS "-target arm64-apple-ios14.2-macabi") add_link_options(-target arm64-apple-ios14.2-macabi) elseif(CLR_CMAKE_HOST_ARCH_AMD64) set(MACOS_VERSION_MIN_FLAGS "-target x86_64-apple-ios13.5-macabi") add_link_options(-target x86_64-apple-ios13.5-macabi) else() clr_unknown_arch() endif() # These options are intentionally set using the CMAKE_XXX_FLAGS instead of # add_compile_options so that they take effect on the configuration functions # in various configure.cmake files. set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${MACOS_VERSION_MIN_FLAGS} ${DISABLE_OVERRIDING_MIN_VERSION_ERROR}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MACOS_VERSION_MIN_FLAGS} ${DISABLE_OVERRIDING_MIN_VERSION_ERROR}") set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${MACOS_VERSION_MIN_FLAGS} ${DISABLE_OVERRIDING_MIN_VERSION_ERROR}") elseif(CLR_CMAKE_HOST_OSX) if(CLR_CMAKE_HOST_ARCH_ARM64) set(CMAKE_OSX_DEPLOYMENT_TARGET "11.0") add_compile_options(-arch arm64) elseif(CLR_CMAKE_HOST_ARCH_AMD64) set(CMAKE_OSX_DEPLOYMENT_TARGET "10.14") add_compile_options(-arch x86_64) else() clr_unknown_arch() endif() endif(CLR_CMAKE_HOST_MACCATALYST) endif(CLR_CMAKE_HOST_UNIX) if(CLR_CMAKE_TARGET_UNIX) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_UNIX>) # Contracts are disabled on UNIX. add_definitions(-DDISABLE_CONTRACTS) if(CLR_CMAKE_TARGET_OSX AND NOT CLR_CMAKE_TARGET_IOS AND NOT CLR_CMAKE_TARGET_TVOS) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_OSX>) elseif(CLR_CMAKE_TARGET_FREEBSD) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_FREEBSD>) elseif(CLR_CMAKE_TARGET_ANDROID) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_ANDROID>) elseif(CLR_CMAKE_TARGET_LINUX) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_LINUX>) if(CLR_CMAKE_TARGET_LINUX_MUSL) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_LINUX_MUSL>) endif() elseif(CLR_CMAKE_TARGET_NETBSD) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_NETBSD>) elseif(CLR_CMAKE_TARGET_SUNOS) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_SUNOS>) if(CLR_CMAKE_TARGET_OS_ILLUMOS) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_ILLUMOS>) endif() endif() else(CLR_CMAKE_TARGET_UNIX) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_WINDOWS>) endif(CLR_CMAKE_TARGET_UNIX) if(CLR_CMAKE_HOST_UNIX_ARM) if (NOT DEFINED CLR_ARM_FPU_TYPE) set(CLR_ARM_FPU_TYPE vfpv3) endif(NOT DEFINED CLR_ARM_FPU_TYPE) # Because we don't use CMAKE_C_COMPILER/CMAKE_CXX_COMPILER to use clang # we have to set the triple by adding a compiler argument add_compile_options(-mthumb) add_compile_options(-mfpu=${CLR_ARM_FPU_TYPE}) if (NOT DEFINED CLR_ARM_FPU_CAPABILITY) set(CLR_ARM_FPU_CAPABILITY 0x7) endif(NOT DEFINED CLR_ARM_FPU_CAPABILITY) add_definitions(-DCLR_ARM_FPU_CAPABILITY=${CLR_ARM_FPU_CAPABILITY}) add_compile_options(-march=armv7-a) if(ARM_SOFTFP) add_definitions(-DARM_SOFTFP) add_compile_options(-mfloat-abi=softfp) endif(ARM_SOFTFP) endif(CLR_CMAKE_HOST_UNIX_ARM) if(CLR_CMAKE_HOST_UNIX_ARMV6) add_compile_options(-mfpu=vfp) add_definitions(-DCLR_ARM_FPU_CAPABILITY=0x0) add_compile_options(-march=armv6zk) add_compile_options(-mcpu=arm1176jzf-s) add_compile_options(-mfloat-abi=hard) endif(CLR_CMAKE_HOST_UNIX_ARMV6) if(CLR_CMAKE_HOST_UNIX_X86) add_compile_options(-msse2) endif() if(CLR_CMAKE_HOST_UNIX) add_compile_options(${CLR_ADDITIONAL_COMPILER_OPTIONS}) endif(CLR_CMAKE_HOST_UNIX) if (MSVC) # Compile options for targeting windows add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/nologo>) # Suppress Startup Banner # /W3 is added by default by CMake, so remove it string(REPLACE "/W3" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") string(REPLACE "/W3" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") # set default warning level to 3 but allow targets to override it. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/W$<GENEX_EVAL:$<IF:$<BOOL:$<TARGET_PROPERTY:MSVC_WARNING_LEVEL>>,$<TARGET_PROPERTY:MSVC_WARNING_LEVEL>,3>>>) add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/WX>) # treat warnings as errors add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Oi>) # enable intrinsics add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Oy->) # disable suppressing of the creation of frame pointers on the call stack for quicker function calls add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Gm->) # disable minimal rebuild add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Zp8>) # pack structs on 8-byte boundary add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Gy>) # separate functions for linker add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/GS>) # Explicitly enable the buffer security checks add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/fp:precise>) # Enable precise floating point # disable C++ RTTI # /GR is added by default by CMake, so remove it manually. string(REPLACE "/GR " " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR-") add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/FC>) # use full pathnames in diagnostics add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/MP>) # Build with Multiple Processes (number of processes equal to the number of processors) add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Zm200>) # Specify Precompiled Header Memory Allocation Limit of 150MB add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Zc:strictStrings>) # Disable string-literal to char* or wchar_t* conversion add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Zc:wchar_t>) # wchar_t is a built-in type. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Zc:inline>) # All inline functions must have their definition available in the current translation unit. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Zc:forScope>) # Enforce standards-compliant for scope. # Disable Warnings: add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4065>) # switch statement contains 'default' but no 'case' labels add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4100>) # 'identifier' : unreferenced formal parameter add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4127>) # conditional expression is constant add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4189>) # local variable is initialized but not referenced add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4200>) # nonstandard extension used : zero-sized array in struct/union add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4201>) # nonstandard extension used : nameless struct/union add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4245>) # conversion from 'type1' to 'type2', signed/unsigned mismatch add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4291>) # no matching operator delete found; memory will not be freed if initialization throws an exception add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4456>) # declaration of 'identifier' hides previous local declaration add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4457>) # declaration of 'identifier' hides function parameter add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4458>) # declaration of 'identifier' hides class member add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4733>) # Inline asm assigning to 'FS:0' : handler not registered as safe handler add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4838>) # conversion from 'type_1' to 'type_2' requires a narrowing conversion add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4960>) # 'function' is too big to be profiled add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4961>) # No profile data was merged into '.pgd file', profile-guided optimizations disabled add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd5105>) # macro expansion producing 'defined' has undefined behavior # Treat Warnings as Errors: add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/we4007>) # 'main' : must be __cdecl. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/we4013>) # 'function' undefined - assuming extern returning int. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/we4102>) # "'%$S' : unreferenced label". add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/we4551>) # Function call missing argument list. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/we4700>) # Local used w/o being initialized. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/we4640>) # 'instance' : construction of local static object is not thread-safe add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/we4806>) # Unsafe operation involving type 'bool'. # Set Warning Level 3: add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/w34092>) # Sizeof returns 'unsigned long'. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/w34121>) # Structure is sensitive to alignment. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/w34125>) # Decimal digit in octal sequence. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/w34130>) # Logical operation on address of string constant. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/w34132>) # Const object should be initialized. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/w34212>) # Function declaration used ellipsis. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/w34530>) # C++ exception handler used, but unwind semantics are not enabled. Specify -GX. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/w35038>) # data member 'member1' will be initialized after data member 'member2'. # Set Warning Level 4: add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/w44177>) # Pragma data_seg s/b at global scope. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Zi>) # enable debugging information add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/ZH:SHA_256>) # use SHA256 for generating hashes of compiler processed source files. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/source-charset:utf-8>) # Force MSVC to compile source as UTF-8. if (CLR_CMAKE_HOST_ARCH_I386) add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Gz>) endif (CLR_CMAKE_HOST_ARCH_I386) add_compile_options($<$<AND:$<COMPILE_LANGUAGE:C,CXX>,$<OR:$<CONFIG:Release>,$<CONFIG:Relwithdebinfo>>>:/GL>) if (CLR_CMAKE_HOST_ARCH_AMD64) # The generator expression in the following command means that the /homeparams option is added only for debug builds for C and C++ source files add_compile_options($<$<AND:$<CONFIG:Debug>,$<COMPILE_LANGUAGE:C,CXX>>:/homeparams>) # Force parameters passed in registers to be written to the stack endif (CLR_CMAKE_HOST_ARCH_AMD64) # enable control-flow-guard support for native components for non-Arm64 builds # Added using variables instead of add_compile_options to let individual projects override it set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /guard:cf") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /guard:cf") # Enable EH-continuation table and CET-compatibility for native components for amd64 builds except for components of the Mono # runtime. Added some switches using variables instead of add_compile_options to let individual projects override it. if (CLR_CMAKE_HOST_ARCH_AMD64 AND NOT CLR_CMAKE_RUNTIME_MONO) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /guard:ehcont") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /guard:ehcont") set(CMAKE_ASM_MASM_FLAGS "${CMAKE_ASM_MASM_FLAGS} /guard:ehcont") add_linker_flag(/guard:ehcont) set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /CETCOMPAT") endif (CLR_CMAKE_HOST_ARCH_AMD64 AND NOT CLR_CMAKE_RUNTIME_MONO) # Statically linked CRT (libcmt[d].lib, libvcruntime[d].lib and libucrt[d].lib) by default. This is done to avoid # linking in VCRUNTIME140.DLL for a simplified xcopy experience by reducing the dependency on VC REDIST. # # For Release builds, we shall dynamically link into uCRT [ucrtbase.dll] (which is pushed down as a Windows Update on downlevel OS) but # wont do the same for debug/checked builds since ucrtbased.dll is not redistributable and Debug/Checked builds are not # production-time scenarios. set(CMAKE_MSVC_RUNTIME_LIBRARY MultiThreaded$<$<AND:$<OR:$<CONFIG:Debug>,$<CONFIG:Checked>>,$<NOT:$<BOOL:$<TARGET_PROPERTY:DAC_COMPONENT>>>>:Debug>) add_compile_options($<$<COMPILE_LANGUAGE:ASM_MASM>:/ZH:SHA_256>) if (CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_ARM64) # Contracts work too slow on ARM/ARM64 DEBUG/CHECKED. add_definitions(-DDISABLE_CONTRACTS) endif (CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_ARM64) # Don't display the output header when building RC files. add_compile_options($<$<COMPILE_LANGUAGE:RC>:/nologo>) endif (MSVC) if(CLR_CMAKE_ENABLE_CODE_COVERAGE) if(CLR_CMAKE_HOST_UNIX) string(TOUPPER ${CMAKE_BUILD_TYPE} UPPERCASE_CMAKE_BUILD_TYPE) if(NOT UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG) message( WARNING "Code coverage results with an optimised (non-Debug) build may be misleading" ) endif(NOT UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG) add_compile_options(-fprofile-arcs) add_compile_options(-ftest-coverage) add_linker_flag(--coverage) else() message(FATAL_ERROR "Code coverage builds not supported on current platform") endif(CLR_CMAKE_HOST_UNIX) endif(CLR_CMAKE_ENABLE_CODE_COVERAGE) if (CMAKE_GENERATOR MATCHES "(Makefile|Ninja)") set(CMAKE_RC_CREATE_SHARED_LIBRARY "${CMAKE_CXX_CREATE_SHARED_LIBRARY}") endif() # Ensure other tools are present if (CLR_CMAKE_HOST_WIN32) if(CLR_CMAKE_HOST_ARCH_ARM) # Explicitly specify the assembler to be used for Arm32 compile file(TO_CMAKE_PATH "$ENV{VCToolsInstallDir}\\bin\\HostX86\\arm\\armasm.exe" CMAKE_ASM_COMPILER) set(CMAKE_ASM_MASM_COMPILER ${CMAKE_ASM_COMPILER}) message("CMAKE_ASM_MASM_COMPILER explicitly set to: ${CMAKE_ASM_MASM_COMPILER}") # Enable generic assembly compilation to avoid CMake generate VS proj files that explicitly # use ml[64].exe as the assembler. enable_language(ASM) set(CMAKE_ASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreaded "") set(CMAKE_ASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreadedDLL "") set(CMAKE_ASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreadedDebug "") set(CMAKE_ASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreadedDebugDLL "") set(CMAKE_ASM_COMPILE_OBJECT "<CMAKE_ASM_COMPILER> -g <INCLUDES> <FLAGS> -o <OBJECT> <SOURCE>") elseif(CLR_CMAKE_HOST_ARCH_ARM64) # Explicitly specify the assembler to be used for Arm64 compile file(TO_CMAKE_PATH "$ENV{VCToolsInstallDir}\\bin\\HostX86\\arm64\\armasm64.exe" CMAKE_ASM_COMPILER) set(CMAKE_ASM_MASM_COMPILER ${CMAKE_ASM_COMPILER}) message("CMAKE_ASM_MASM_COMPILER explicitly set to: ${CMAKE_ASM_MASM_COMPILER}") # Enable generic assembly compilation to avoid CMake generate VS proj files that explicitly # use ml[64].exe as the assembler. enable_language(ASM) set(CMAKE_ASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreaded "") set(CMAKE_ASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreadedDLL "") set(CMAKE_ASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreadedDebug "") set(CMAKE_ASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreadedDebugDLL "") set(CMAKE_ASM_COMPILE_OBJECT "<CMAKE_ASM_COMPILER> -g <INCLUDES> <FLAGS> -o <OBJECT> <SOURCE>") else() enable_language(ASM_MASM) set(CMAKE_ASM_MASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreaded "") set(CMAKE_ASM_MASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreadedDLL "") set(CMAKE_ASM_MASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreadedDebug "") set(CMAKE_ASM_MASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreadedDebugDLL "") endif() # Ensure that MC is present find_program(MC mc) if (MC STREQUAL "MC-NOTFOUND") message(FATAL_ERROR "MC not found") endif() elseif (NOT CLR_CMAKE_HOST_BROWSER) enable_language(ASM) endif(CLR_CMAKE_HOST_WIN32)
include(${CMAKE_CURRENT_LIST_DIR}/configuretools.cmake) # Set initial flags for each configuration set(CMAKE_EXPORT_COMPILE_COMMANDS ON) set(CMAKE_C_STANDARD 11) set(CMAKE_C_STANDARD_REQUIRED ON) set(CMAKE_CXX_STANDARD 11) set(CMAKE_CXX_STANDARD_REQUIRED ON) include(CheckCCompilerFlag) include(CheckCXXCompilerFlag) # "configureoptimization.cmake" must be included after CLR_CMAKE_HOST_UNIX has been set. include(${CMAKE_CURRENT_LIST_DIR}/configureoptimization.cmake) #----------------------------------------------------- # Initialize Cmake compiler flags and other variables #----------------------------------------------------- if (CLR_CMAKE_HOST_UNIX) add_compile_options(-g) add_compile_options(-Wall) if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Wno-null-conversion) else() add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-Werror=conversion-null>) endif() endif() if (CMAKE_CONFIGURATION_TYPES) # multi-configuration generator? set(CMAKE_CONFIGURATION_TYPES "Debug;Checked;Release;RelWithDebInfo" CACHE STRING "" FORCE) endif (CMAKE_CONFIGURATION_TYPES) set(CMAKE_C_FLAGS_CHECKED "") set(CMAKE_CXX_FLAGS_CHECKED "") set(CMAKE_EXE_LINKER_FLAGS_CHECKED "") set(CMAKE_SHARED_LINKER_FLAGS_CHECKED "") set(CMAKE_SHARED_LINKER_FLAGS_DEBUG "") set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "") set(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "") set(CMAKE_EXE_LINKER_FLAGS_DEBUG "") set(CMAKE_EXE_LINKER_FLAGS_DEBUG "") set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "") add_compile_definitions("$<$<CONFIG:DEBUG>:DEBUG;_DEBUG;_DBG;URTBLDENV_FRIENDLY=Debug;BUILDENV_DEBUG=1>") add_compile_definitions("$<$<CONFIG:CHECKED>:DEBUG;_DEBUG;_DBG;URTBLDENV_FRIENDLY=Checked;BUILDENV_CHECKED=1>") add_compile_definitions("$<$<OR:$<CONFIG:RELEASE>,$<CONFIG:RELWITHDEBINFO>>:NDEBUG;URTBLDENV_FRIENDLY=Retail>") if (MSVC) add_linker_flag(/guard:cf) # Linker flags # set (WINDOWS_SUBSYSTEM_VERSION 6.01) if (CLR_CMAKE_HOST_ARCH_ARM) set(WINDOWS_SUBSYSTEM_VERSION 6.02) #windows subsystem - arm minimum is 6.02 elseif(CLR_CMAKE_HOST_ARCH_ARM64) set(WINDOWS_SUBSYSTEM_VERSION 6.03) #windows subsystem - arm64 minimum is 6.03 endif () #Do not create Side-by-Side Assembly Manifest set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /MANIFEST:NO") # can handle addresses larger than 2 gigabytes set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /LARGEADDRESSAWARE") #shrink pdb size set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /PDBCOMPRESS") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /DEBUG") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /IGNORE:4197,4013,4254,4070,4221") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /SUBSYSTEM:WINDOWS,${WINDOWS_SUBSYSTEM_VERSION}") set(CMAKE_STATIC_LINKER_FLAGS "${CMAKE_STATIC_LINKER_FLAGS} /IGNORE:4221") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /DEBUG") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /PDBCOMPRESS") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /STACK:1572864") # Checked build specific flags add_linker_flag(/INCREMENTAL:NO CHECKED) # prevent "warning LNK4075: ignoring '/INCREMENTAL' due to '/OPT:REF' specification" add_linker_flag(/OPT:REF CHECKED) add_linker_flag(/OPT:NOICF CHECKED) # Release build specific flags add_linker_flag(/LTCG RELEASE) add_linker_flag(/OPT:REF RELEASE) add_linker_flag(/OPT:ICF RELEASE) add_linker_flag(/INCREMENTAL:NO RELEASE) set(CMAKE_STATIC_LINKER_FLAGS_RELEASE "${CMAKE_STATIC_LINKER_FLAGS_RELEASE} /LTCG") # ReleaseWithDebugInfo build specific flags add_linker_flag(/LTCG RELWITHDEBINFO) add_linker_flag(/OPT:REF RELWITHDEBINFO) add_linker_flag(/OPT:ICF RELWITHDEBINFO) set(CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO} /LTCG") # Force uCRT to be dynamically linked for Release build add_linker_flag(/NODEFAULTLIB:libucrt.lib RELEASE) add_linker_flag(/DEFAULTLIB:ucrt.lib RELEASE) elseif (CLR_CMAKE_HOST_UNIX) # Set the values to display when interactively configuring CMAKE_BUILD_TYPE set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "DEBUG;CHECKED;RELEASE;RELWITHDEBINFO") # Use uppercase CMAKE_BUILD_TYPE for the string comparisons below string(TOUPPER ${CMAKE_BUILD_TYPE} UPPERCASE_CMAKE_BUILD_TYPE) set(CLR_SANITIZE_CXX_OPTIONS "") set(CLR_SANITIZE_LINK_OPTIONS "") # set the CLANG sanitizer flags for debug build if(UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG OR UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED) # obtain settings from running enablesanitizers.sh string(FIND "$ENV{DEBUG_SANITIZERS}" "asan" __ASAN_POS) string(FIND "$ENV{DEBUG_SANITIZERS}" "ubsan" __UBSAN_POS) if ((${__ASAN_POS} GREATER -1) OR (${__UBSAN_POS} GREATER -1)) list(APPEND CLR_SANITIZE_CXX_OPTIONS -fsanitize-blacklist=${CMAKE_CURRENT_SOURCE_DIR}/sanitizerblacklist.txt) set (CLR_CXX_SANITIZERS "") set (CLR_LINK_SANITIZERS "") if (${__ASAN_POS} GREATER -1) list(APPEND CLR_CXX_SANITIZERS address) list(APPEND CLR_LINK_SANITIZERS address) set(CLR_SANITIZE_CXX_FLAGS "${CLR_SANITIZE_CXX_FLAGS}address,") set(CLR_SANITIZE_LINK_FLAGS "${CLR_SANITIZE_LINK_FLAGS}address,") add_definitions(-DHAS_ASAN) message("Address Sanitizer (asan) enabled") endif () if (${__UBSAN_POS} GREATER -1) # all sanitizier flags are enabled except alignment (due to heavy use of __unaligned modifier) list(APPEND CLR_CXX_SANITIZERS "bool" bounds enum float-cast-overflow float-divide-by-zero "function" integer nonnull-attribute null object-size "return" returns-nonnull-attribute shift unreachable vla-bound vptr) list(APPEND CLR_LINK_SANITIZERS undefined) message("Undefined Behavior Sanitizer (ubsan) enabled") endif () list(JOIN CLR_CXX_SANITIZERS "," CLR_CXX_SANITIZERS_OPTIONS) list(APPEND CLR_SANITIZE_CXX_OPTIONS "-fsanitize=${CLR_CXX_SANITIZERS_OPTIONS}") list(JOIN CLR_LINK_SANITIZERS "," CLR_LINK_SANITIZERS_OPTIONS) list(APPEND CLR_SANITIZE_LINK_OPTIONS "-fsanitize=${CLR_LINK_SANITIZERS_OPTIONS}") # -fdata-sections -ffunction-sections: each function has own section instead of one per .o file (needed for --gc-sections) # -O1: optimization level used instead of -O0 to avoid compile error "invalid operand for inline asm constraint" add_compile_options("$<$<OR:$<CONFIG:DEBUG>,$<CONFIG:CHECKED>>:${CLR_SANITIZE_CXX_OPTIONS};-fdata-sections;--ffunction-sections;-O1>") add_linker_flag("${CLR_SANITIZE_LINK_OPTIONS}" DEBUG CHECKED) # -Wl and --gc-sections: drop unused sections\functions (similar to Windows /Gy function-level-linking) add_linker_flag("-Wl,--gc-sections" DEBUG CHECKED) endif () endif(UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG OR UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED) if(CLR_CMAKE_HOST_BROWSER) # The emscripten build has additional warnings so -Werror breaks add_compile_options(-Wno-unused-parameter) add_compile_options(-Wno-alloca) add_compile_options(-Wno-implicit-int-float-conversion) endif() endif(MSVC) # CLR_ADDITIONAL_LINKER_FLAGS - used for passing additional arguments to linker # CLR_ADDITIONAL_COMPILER_OPTIONS - used for passing additional arguments to compiler # # For example: # ./build-native.sh cmakeargs "-DCLR_ADDITIONAL_COMPILER_OPTIONS=<...>" cmakeargs "-DCLR_ADDITIONAL_LINKER_FLAGS=<...>" # if(CLR_CMAKE_HOST_UNIX) foreach(ADDTL_LINKER_FLAG ${CLR_ADDITIONAL_LINKER_FLAGS}) add_linker_flag(${ADDTL_LINKER_FLAG}) endforeach() endif(CLR_CMAKE_HOST_UNIX) if(CLR_CMAKE_HOST_LINUX) add_compile_options($<$<COMPILE_LANGUAGE:ASM>:-Wa,--noexecstack>) add_linker_flag(-Wl,--build-id=sha1) add_linker_flag(-Wl,-z,relro,-z,now) elseif(CLR_CMAKE_HOST_FREEBSD) add_compile_options($<$<COMPILE_LANGUAGE:ASM>:-Wa,--noexecstack>) add_linker_flag("-Wl,--build-id=sha1") elseif(CLR_CMAKE_HOST_SUNOS) add_compile_options($<$<COMPILE_LANGUAGE:ASM>:-Wa,--noexecstack>) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fstack-protector") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstack-protector") add_definitions(-D__EXTENSIONS__ -D_XPG4_2 -D_POSIX_PTHREAD_SEMANTICS) elseif(CLR_CMAKE_HOST_OSX AND NOT CLR_CMAKE_HOST_IOS AND NOT CLR_CMAKE_HOST_TVOS) add_definitions(-D_XOPEN_SOURCE) add_linker_flag("-Wl,-bind_at_load") endif() #------------------------------------ # Definitions (for platform) #----------------------------------- if (CLR_CMAKE_HOST_ARCH_AMD64) set(ARCH_HOST_NAME x64) add_definitions(-DHOST_AMD64 -DHOST_64BIT) elseif (CLR_CMAKE_HOST_ARCH_I386) set(ARCH_HOST_NAME x86) add_definitions(-DHOST_X86) elseif (CLR_CMAKE_HOST_ARCH_ARM) set(ARCH_HOST_NAME arm) add_definitions(-DHOST_ARM) elseif (CLR_CMAKE_HOST_ARCH_ARMV6) set(ARCH_HOST_NAME armv6) add_definitions(-DHOST_ARMV6) elseif (CLR_CMAKE_HOST_ARCH_ARM64) set(ARCH_HOST_NAME arm64) add_definitions(-DHOST_ARM64 -DHOST_64BIT) elseif (CLR_CMAKE_HOST_ARCH_LOONGARCH64) set(ARCH_HOST_NAME loongarch64) add_definitions(-DHOST_LOONGARCH64 -DHOST_64BIT) elseif (CLR_CMAKE_HOST_ARCH_S390X) set(ARCH_HOST_NAME s390x) add_definitions(-DHOST_S390X -DHOST_64BIT -DBIGENDIAN) elseif (CLR_CMAKE_HOST_ARCH_WASM) set(ARCH_HOST_NAME wasm) add_definitions(-DHOST_WASM -DHOST_32BIT=1) elseif (CLR_CMAKE_HOST_ARCH_MIPS64) set(ARCH_HOST_NAME mips64) add_definitions(-DHOST_MIPS64 -DHOST_64BIT=1) else () clr_unknown_arch() endif () if (CLR_CMAKE_HOST_UNIX) if(CLR_CMAKE_HOST_LINUX) if(CLR_CMAKE_HOST_UNIX_AMD64) message("Detected Linux x86_64") elseif(CLR_CMAKE_HOST_UNIX_ARM) message("Detected Linux ARM") elseif(CLR_CMAKE_HOST_UNIX_ARMV6) message("Detected Linux ARMv6") elseif(CLR_CMAKE_HOST_UNIX_ARM64) message("Detected Linux ARM64") elseif(CLR_CMAKE_HOST_UNIX_LOONGARCH64) message("Detected Linux LOONGARCH64") elseif(CLR_CMAKE_HOST_UNIX_X86) message("Detected Linux i686") elseif(CLR_CMAKE_HOST_UNIX_S390X) message("Detected Linux s390x") else() clr_unknown_arch() endif() endif(CLR_CMAKE_HOST_LINUX) endif(CLR_CMAKE_HOST_UNIX) if (CLR_CMAKE_HOST_UNIX) add_definitions(-DHOST_UNIX) if(CLR_CMAKE_HOST_OSX OR CLR_CMAKE_HOST_MACCATALYST) add_definitions(-DHOST_OSX) if(CLR_CMAKE_HOST_UNIX_AMD64) message("Detected OSX x86_64") elseif(CLR_CMAKE_HOST_UNIX_ARM64) message("Detected OSX ARM64") else() clr_unknown_arch() endif() elseif(CLR_CMAKE_HOST_FREEBSD) message("Detected FreeBSD amd64") elseif(CLR_CMAKE_HOST_NETBSD) message("Detected NetBSD amd64") elseif(CLR_CMAKE_HOST_SUNOS) message("Detected SunOS amd64") endif(CLR_CMAKE_HOST_OSX OR CLR_CMAKE_HOST_MACCATALYST) endif(CLR_CMAKE_HOST_UNIX) if (CLR_CMAKE_HOST_WIN32) add_definitions(-DHOST_WINDOWS) # Define the CRT lib references that link into Desktop imports set(STATIC_MT_CRT_LIB "libcmt$<$<OR:$<CONFIG:Debug>,$<CONFIG:Checked>>:d>.lib") set(STATIC_MT_VCRT_LIB "libvcruntime$<$<OR:$<CONFIG:Debug>,$<CONFIG:Checked>>:d>.lib") set(STATIC_MT_CPP_LIB "libcpmt$<$<OR:$<CONFIG:Debug>,$<CONFIG:Checked>>:d>.lib") endif(CLR_CMAKE_HOST_WIN32) # Unconditionally define _FILE_OFFSET_BITS as 64 on all platforms. add_definitions(-D_FILE_OFFSET_BITS=64) # Architecture specific files folder name if (CLR_CMAKE_TARGET_ARCH_AMD64) set(ARCH_SOURCES_DIR amd64) set(ARCH_TARGET_NAME x64) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_AMD64>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_64BIT>) elseif (CLR_CMAKE_TARGET_ARCH_ARM64) set(ARCH_SOURCES_DIR arm64) set(ARCH_TARGET_NAME arm64) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_ARM64>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_64BIT>) elseif (CLR_CMAKE_TARGET_ARCH_ARM) set(ARCH_SOURCES_DIR arm) set(ARCH_TARGET_NAME arm) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_ARM>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_32BIT>) elseif (CLR_CMAKE_TARGET_ARCH_ARMV6) set(ARCH_SOURCES_DIR arm) set(ARCH_TARGET_NAME armv6) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_ARM>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_ARMV6>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_32BIT>) elseif (CLR_CMAKE_TARGET_ARCH_I386) set(ARCH_TARGET_NAME x86) set(ARCH_SOURCES_DIR i386) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_X86>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_32BIT>) elseif (CLR_CMAKE_TARGET_ARCH_LOONGARCH64) set(ARCH_TARGET_NAME loongarch64) set(ARCH_SOURCES_DIR loongarch64) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_LOONGARCH64>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_64BIT>) elseif (CLR_CMAKE_TARGET_ARCH_S390X) set(ARCH_TARGET_NAME s390x) set(ARCH_SOURCES_DIR s390x) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_S390X>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_64BIT>) elseif (CLR_CMAKE_TARGET_ARCH_WASM) set(ARCH_TARGET_NAME wasm) set(ARCH_SOURCES_DIR wasm) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_WASM>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_32BIT>) elseif (CLR_CMAKE_TARGET_ARCH_MIPS64) set(ARCH_TARGET_NAME mips64) set(ARCH_SOURCES_DIR mips64) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_MIPS64>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:TARGET_64BIT>) else () clr_unknown_arch() endif () #-------------------------------------- # Compile Options #-------------------------------------- if (CLR_CMAKE_HOST_UNIX) # Disable frame pointer optimizations so profilers can get better call stacks add_compile_options(-fno-omit-frame-pointer) # The -fms-extensions enable the stuff like __if_exists, __declspec(uuid()), etc. add_compile_options(-fms-extensions) #-fms-compatibility Enable full Microsoft Visual C++ compatibility #-fms-extensions Accept some non-standard constructs supported by the Microsoft compiler # Make signed arithmetic overflow of addition, subtraction, and multiplication wrap around # using twos-complement representation (this is normally undefined according to the C++ spec). add_compile_options(-fwrapv) if(CLR_CMAKE_HOST_OSX OR CLR_CMAKE_HOST_MACCATALYST) # We cannot enable "stack-protector-strong" on OS X due to a bug in clang compiler (current version 7.0.2) add_compile_options(-fstack-protector) elseif(NOT CLR_CMAKE_HOST_BROWSER) check_c_compiler_flag(-fstack-protector-strong COMPILER_SUPPORTS_F_STACK_PROTECTOR_STRONG) if (COMPILER_SUPPORTS_F_STACK_PROTECTOR_STRONG) add_compile_options(-fstack-protector-strong) endif() endif(CLR_CMAKE_HOST_OSX OR CLR_CMAKE_HOST_MACCATALYST) # Suppress warnings-as-errors in release branches to reduce servicing churn if (PRERELEASE) add_compile_options(-Werror) endif(PRERELEASE) # Disabled common warnings add_compile_options(-Wno-unused-variable) add_compile_options(-Wno-unused-value) add_compile_options(-Wno-unused-function) add_compile_options(-Wno-tautological-compare) add_compile_options(-Wno-unknown-pragmas) # Explicitly enabled warnings check_c_compiler_flag(-Wimplicit-fallthrough COMPILER_SUPPORTS_W_IMPLICIT_FALLTHROUGH) if (COMPILER_SUPPORTS_W_IMPLICIT_FALLTHROUGH) add_compile_options(-Wimplicit-fallthrough) endif() #These seem to indicate real issues add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-Wno-invalid-offsetof>) add_compile_options(-Wno-unused-but-set-variable) # Turn off floating point expression contraction because it is considered a value changing # optimization in the IEEE 754 specification and is therefore considered unsafe. add_compile_options(-ffp-contract=off) if (CMAKE_C_COMPILER_ID MATCHES "Clang") add_compile_options(-Wno-unknown-warning-option) # The -ferror-limit is helpful during the porting, it makes sure the compiler doesn't stop # after hitting just about 20 errors. add_compile_options(-ferror-limit=4096) # Disabled warnings add_compile_options(-Wno-unused-private-field) # There are constants of type BOOL used in a condition. But BOOL is defined as int # and so the compiler thinks that there is a mistake. add_compile_options(-Wno-constant-logical-operand) # We use pshpack1/2/4/8.h and poppack.h headers to set and restore packing. However # clang 6.0 complains when the packing change lifetime is not contained within # a header file. add_compile_options(-Wno-pragma-pack) # The following warning indicates that an attribute __attribute__((__ms_struct__)) was applied # to a struct or a class that has virtual members or a base class. In that case, clang # may not generate the same object layout as MSVC. add_compile_options(-Wno-incompatible-ms-struct) add_compile_options(-Wno-reserved-identifier) else() add_compile_options(-Wno-uninitialized) add_compile_options(-Wno-strict-aliasing) add_compile_options(-Wno-array-bounds) add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-Wno-class-memaccess>) add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-Wno-misleading-indentation>) add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-Wno-stringop-overflow>) add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-Wno-stringop-truncation>) add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-Wno-placement-new>) if (CMAKE_CXX_COMPILER_ID) check_cxx_compiler_flag(-faligned-new COMPILER_SUPPORTS_F_ALIGNED_NEW) if (COMPILER_SUPPORTS_F_ALIGNED_NEW) add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-faligned-new>) endif() endif() endif() # Some architectures (e.g., ARM) assume char type is unsigned while CoreCLR assumes char is signed # as x64 does. It has been causing issues in ARM (https://github.com/dotnet/runtime/issues/5778) add_compile_options(-fsigned-char) # We mark the function which needs exporting with DLLEXPORT add_compile_options(-fvisibility=hidden) # Specify the minimum supported version of macOS # Mac Catalyst needs a special CFLAG, exclusive with mmacosx-version-min if(CLR_CMAKE_HOST_MACCATALYST) # Somewhere between CMake 3.17 and 3.19.4, it became impossible to not pass # a value for mmacosx-version-min (blank CMAKE_OSX_DEPLOYMENT_TARGET gets # replaced with a default value, and always gets expanded to an OS version. # https://gitlab.kitware.com/cmake/cmake/-/issues/20132 # We need to disable the warning that -tagret replaces -mmacosx-version-min set(DISABLE_OVERRIDING_MIN_VERSION_ERROR -Wno-overriding-t-option) add_link_options(-Wno-overriding-t-option) if(CLR_CMAKE_HOST_ARCH_ARM64) set(MACOS_VERSION_MIN_FLAGS "-target arm64-apple-ios14.2-macabi") add_link_options(-target arm64-apple-ios14.2-macabi) elseif(CLR_CMAKE_HOST_ARCH_AMD64) set(MACOS_VERSION_MIN_FLAGS "-target x86_64-apple-ios13.5-macabi") add_link_options(-target x86_64-apple-ios13.5-macabi) else() clr_unknown_arch() endif() # These options are intentionally set using the CMAKE_XXX_FLAGS instead of # add_compile_options so that they take effect on the configuration functions # in various configure.cmake files. set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${MACOS_VERSION_MIN_FLAGS} ${DISABLE_OVERRIDING_MIN_VERSION_ERROR}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MACOS_VERSION_MIN_FLAGS} ${DISABLE_OVERRIDING_MIN_VERSION_ERROR}") set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${MACOS_VERSION_MIN_FLAGS} ${DISABLE_OVERRIDING_MIN_VERSION_ERROR}") elseif(CLR_CMAKE_HOST_OSX) if(CLR_CMAKE_HOST_ARCH_ARM64) set(CMAKE_OSX_DEPLOYMENT_TARGET "11.0") add_compile_options(-arch arm64) elseif(CLR_CMAKE_HOST_ARCH_AMD64) set(CMAKE_OSX_DEPLOYMENT_TARGET "10.14") add_compile_options(-arch x86_64) else() clr_unknown_arch() endif() endif(CLR_CMAKE_HOST_MACCATALYST) endif(CLR_CMAKE_HOST_UNIX) if(CLR_CMAKE_TARGET_UNIX) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_UNIX>) # Contracts are disabled on UNIX. add_definitions(-DDISABLE_CONTRACTS) if(CLR_CMAKE_TARGET_OSX AND NOT CLR_CMAKE_TARGET_IOS AND NOT CLR_CMAKE_TARGET_TVOS) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_OSX>) elseif(CLR_CMAKE_TARGET_FREEBSD) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_FREEBSD>) elseif(CLR_CMAKE_TARGET_ANDROID) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_ANDROID>) elseif(CLR_CMAKE_TARGET_LINUX) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_LINUX>) if(CLR_CMAKE_TARGET_LINUX_MUSL) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_LINUX_MUSL>) endif() elseif(CLR_CMAKE_TARGET_NETBSD) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_NETBSD>) elseif(CLR_CMAKE_TARGET_SUNOS) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_SUNOS>) if(CLR_CMAKE_TARGET_OS_ILLUMOS) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_ILLUMOS>) endif() endif() else(CLR_CMAKE_TARGET_UNIX) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_OS>>>:TARGET_WINDOWS>) endif(CLR_CMAKE_TARGET_UNIX) if(CLR_CMAKE_HOST_UNIX_ARM) if (NOT DEFINED CLR_ARM_FPU_TYPE) set(CLR_ARM_FPU_TYPE vfpv3) endif(NOT DEFINED CLR_ARM_FPU_TYPE) # Because we don't use CMAKE_C_COMPILER/CMAKE_CXX_COMPILER to use clang # we have to set the triple by adding a compiler argument add_compile_options(-mthumb) add_compile_options(-mfpu=${CLR_ARM_FPU_TYPE}) if (NOT DEFINED CLR_ARM_FPU_CAPABILITY) set(CLR_ARM_FPU_CAPABILITY 0x7) endif(NOT DEFINED CLR_ARM_FPU_CAPABILITY) add_definitions(-DCLR_ARM_FPU_CAPABILITY=${CLR_ARM_FPU_CAPABILITY}) add_compile_options(-march=armv7-a) if(ARM_SOFTFP) add_definitions(-DARM_SOFTFP) add_compile_options(-mfloat-abi=softfp) endif(ARM_SOFTFP) endif(CLR_CMAKE_HOST_UNIX_ARM) if(CLR_CMAKE_HOST_UNIX_ARMV6) add_compile_options(-mfpu=vfp) add_definitions(-DCLR_ARM_FPU_CAPABILITY=0x0) add_compile_options(-march=armv6zk) add_compile_options(-mcpu=arm1176jzf-s) add_compile_options(-mfloat-abi=hard) endif(CLR_CMAKE_HOST_UNIX_ARMV6) if(CLR_CMAKE_HOST_UNIX_X86) add_compile_options(-msse2) endif() if(CLR_CMAKE_HOST_UNIX) add_compile_options(${CLR_ADDITIONAL_COMPILER_OPTIONS}) endif(CLR_CMAKE_HOST_UNIX) if (MSVC) # Compile options for targeting windows add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/nologo>) # Suppress Startup Banner # /W3 is added by default by CMake, so remove it string(REPLACE "/W3" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") string(REPLACE "/W3" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") # [[! Microsoft.Security.SystemsADM.10086 !]] - SDL required warnings # set default warning level to 4 but allow targets to override it. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/W$<GENEX_EVAL:$<IF:$<BOOL:$<TARGET_PROPERTY:MSVC_WARNING_LEVEL>>,$<TARGET_PROPERTY:MSVC_WARNING_LEVEL>,4>>>) add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/WX>) # treat warnings as errors add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Oi>) # enable intrinsics add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Oy->) # disable suppressing of the creation of frame pointers on the call stack for quicker function calls add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Gm->) # disable minimal rebuild add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Zp8>) # pack structs on 8-byte boundary add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Gy>) # separate functions for linker add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/GS>) # Explicitly enable the buffer security checks add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/fp:precise>) # Enable precise floating point # disable C++ RTTI # /GR is added by default by CMake, so remove it manually. string(REPLACE "/GR " " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR-") add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/FC>) # use full pathnames in diagnostics add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/MP>) # Build with Multiple Processes (number of processes equal to the number of processors) add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Zm200>) # Specify Precompiled Header Memory Allocation Limit of 150MB add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Zc:strictStrings>) # Disable string-literal to char* or wchar_t* conversion add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Zc:wchar_t>) # wchar_t is a built-in type. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Zc:inline>) # All inline functions must have their definition available in the current translation unit. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Zc:forScope>) # Enforce standards-compliant for scope. # Disable Warnings: add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4065>) # switch statement contains 'default' but no 'case' labels add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4100>) # 'identifier' : unreferenced formal parameter add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4127>) # conditional expression is constant add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4131>) # 'function' : uses old-style declarator add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4189>) # local variable is initialized but not referenced add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4200>) # nonstandard extension used : zero-sized array in struct/union add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4201>) # nonstandard extension used : nameless struct/union add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4206>) # nonstandard extension used : translation unit is empty add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4239>) # nonstandard extension used : 'token' : conversion from 'type' to 'type' add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4244>) # conversion from 'type1' to 'type2', possible loss of data add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4245>) # conversion from 'type1' to 'type2', signed/unsigned mismatch add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4291>) # no matching operator delete found; memory will not be freed if initialization throws an exception add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4310>) # cast truncates constant value add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4324>) # 'struct_name' : structure was padded due to __declspec(align()) add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4366>) # The result of the unary 'operator' operator may be unaligned add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4456>) # declaration of 'identifier' hides previous local declaration add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4457>) # declaration of 'identifier' hides function parameter add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4458>) # declaration of 'identifier' hides class member add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4459>) # declaration of 'identifier' hides global declaration add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4463>) # overflow; assigning value to bit-field that can only hold values from low_value to high_value add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4505>) # unreferenced function with internal linkage has been removed add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4611>) # interaction between 'function' and C++ object destruction is non-portable add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4701>) # potentially uninitialized local variable 'var' used add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4702>) # unreachable code add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4703>) # potentially uninitialized local pointer variable 'var' used add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4706>) # assignment within conditional expression add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4733>) # Inline asm assigning to 'FS:0' : handler not registered as safe handler add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4815>) # 'var': zero-sized array in stack object will have no elements (unless the object is an aggregate that has been aggregate initialized) add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4838>) # conversion from 'type_1' to 'type_2' requires a narrowing conversion add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4918>) # 'character' : invalid character in pragma optimization list add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4960>) # 'function' is too big to be profiled add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd4961>) # No profile data was merged into '.pgd file', profile-guided optimizations disabled add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd5105>) # macro expansion producing 'defined' has undefined behavior add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/wd5205>) # delete of an abstract class 'type-name' that has a non-virtual destructor results in undefined behavior # Treat Warnings as Errors: add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/we4007>) # 'main' : must be __cdecl. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/we4013>) # 'function' undefined - assuming extern returning int. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/we4102>) # "'%$S' : unreferenced label". add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/we4551>) # Function call missing argument list. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/we4700>) # Local used w/o being initialized. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/we4640>) # 'instance' : construction of local static object is not thread-safe add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/we4806>) # Unsafe operation involving type 'bool'. # Set Warning Level 3: add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/w34092>) # Sizeof returns 'unsigned long'. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/w34121>) # Structure is sensitive to alignment. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/w34125>) # Decimal digit in octal sequence. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/w34130>) # Logical operation on address of string constant. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/w34132>) # Const object should be initialized. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/w34212>) # Function declaration used ellipsis. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/w34530>) # C++ exception handler used, but unwind semantics are not enabled. Specify -GX. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/w35038>) # data member 'member1' will be initialized after data member 'member2'. # Set Warning Level 4: add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/w44177>) # Pragma data_seg s/b at global scope. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Zi>) # enable debugging information add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/ZH:SHA_256>) # use SHA256 for generating hashes of compiler processed source files. add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/source-charset:utf-8>) # Force MSVC to compile source as UTF-8. if (CLR_CMAKE_HOST_ARCH_I386) add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Gz>) endif (CLR_CMAKE_HOST_ARCH_I386) add_compile_options($<$<AND:$<COMPILE_LANGUAGE:C,CXX>,$<OR:$<CONFIG:Release>,$<CONFIG:Relwithdebinfo>>>:/GL>) if (CLR_CMAKE_HOST_ARCH_AMD64) # The generator expression in the following command means that the /homeparams option is added only for debug builds for C and C++ source files add_compile_options($<$<AND:$<CONFIG:Debug>,$<COMPILE_LANGUAGE:C,CXX>>:/homeparams>) # Force parameters passed in registers to be written to the stack endif (CLR_CMAKE_HOST_ARCH_AMD64) # enable control-flow-guard support for native components for non-Arm64 builds # Added using variables instead of add_compile_options to let individual projects override it set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /guard:cf") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /guard:cf") # Enable EH-continuation table and CET-compatibility for native components for amd64 builds except for components of the Mono # runtime. Added some switches using variables instead of add_compile_options to let individual projects override it. if (CLR_CMAKE_HOST_ARCH_AMD64 AND NOT CLR_CMAKE_RUNTIME_MONO) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /guard:ehcont") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /guard:ehcont") set(CMAKE_ASM_MASM_FLAGS "${CMAKE_ASM_MASM_FLAGS} /guard:ehcont") add_linker_flag(/guard:ehcont) set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /CETCOMPAT") endif (CLR_CMAKE_HOST_ARCH_AMD64 AND NOT CLR_CMAKE_RUNTIME_MONO) # Statically linked CRT (libcmt[d].lib, libvcruntime[d].lib and libucrt[d].lib) by default. This is done to avoid # linking in VCRUNTIME140.DLL for a simplified xcopy experience by reducing the dependency on VC REDIST. # # For Release builds, we shall dynamically link into uCRT [ucrtbase.dll] (which is pushed down as a Windows Update on downlevel OS) but # wont do the same for debug/checked builds since ucrtbased.dll is not redistributable and Debug/Checked builds are not # production-time scenarios. set(CMAKE_MSVC_RUNTIME_LIBRARY MultiThreaded$<$<AND:$<OR:$<CONFIG:Debug>,$<CONFIG:Checked>>,$<NOT:$<BOOL:$<TARGET_PROPERTY:DAC_COMPONENT>>>>:Debug>) add_compile_options($<$<COMPILE_LANGUAGE:ASM_MASM>:/ZH:SHA_256>) if (CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_ARM64) # Contracts work too slow on ARM/ARM64 DEBUG/CHECKED. add_definitions(-DDISABLE_CONTRACTS) endif (CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_ARM64) # Don't display the output header when building RC files. add_compile_options($<$<COMPILE_LANGUAGE:RC>:/nologo>) endif (MSVC) if(CLR_CMAKE_ENABLE_CODE_COVERAGE) if(CLR_CMAKE_HOST_UNIX) string(TOUPPER ${CMAKE_BUILD_TYPE} UPPERCASE_CMAKE_BUILD_TYPE) if(NOT UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG) message( WARNING "Code coverage results with an optimised (non-Debug) build may be misleading" ) endif(NOT UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG) add_compile_options(-fprofile-arcs) add_compile_options(-ftest-coverage) add_linker_flag(--coverage) else() message(FATAL_ERROR "Code coverage builds not supported on current platform") endif(CLR_CMAKE_HOST_UNIX) endif(CLR_CMAKE_ENABLE_CODE_COVERAGE) if (CMAKE_GENERATOR MATCHES "(Makefile|Ninja)") set(CMAKE_RC_CREATE_SHARED_LIBRARY "${CMAKE_CXX_CREATE_SHARED_LIBRARY}") endif() # Ensure other tools are present if (CLR_CMAKE_HOST_WIN32) if(CLR_CMAKE_HOST_ARCH_ARM) # Explicitly specify the assembler to be used for Arm32 compile file(TO_CMAKE_PATH "$ENV{VCToolsInstallDir}\\bin\\HostX86\\arm\\armasm.exe" CMAKE_ASM_COMPILER) set(CMAKE_ASM_MASM_COMPILER ${CMAKE_ASM_COMPILER}) message("CMAKE_ASM_MASM_COMPILER explicitly set to: ${CMAKE_ASM_MASM_COMPILER}") # Enable generic assembly compilation to avoid CMake generate VS proj files that explicitly # use ml[64].exe as the assembler. enable_language(ASM) set(CMAKE_ASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreaded "") set(CMAKE_ASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreadedDLL "") set(CMAKE_ASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreadedDebug "") set(CMAKE_ASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreadedDebugDLL "") set(CMAKE_ASM_COMPILE_OBJECT "<CMAKE_ASM_COMPILER> -g <INCLUDES> <FLAGS> -o <OBJECT> <SOURCE>") elseif(CLR_CMAKE_HOST_ARCH_ARM64) # Explicitly specify the assembler to be used for Arm64 compile file(TO_CMAKE_PATH "$ENV{VCToolsInstallDir}\\bin\\HostX86\\arm64\\armasm64.exe" CMAKE_ASM_COMPILER) set(CMAKE_ASM_MASM_COMPILER ${CMAKE_ASM_COMPILER}) message("CMAKE_ASM_MASM_COMPILER explicitly set to: ${CMAKE_ASM_MASM_COMPILER}") # Enable generic assembly compilation to avoid CMake generate VS proj files that explicitly # use ml[64].exe as the assembler. enable_language(ASM) set(CMAKE_ASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreaded "") set(CMAKE_ASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreadedDLL "") set(CMAKE_ASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreadedDebug "") set(CMAKE_ASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreadedDebugDLL "") set(CMAKE_ASM_COMPILE_OBJECT "<CMAKE_ASM_COMPILER> -g <INCLUDES> <FLAGS> -o <OBJECT> <SOURCE>") else() enable_language(ASM_MASM) set(CMAKE_ASM_MASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreaded "") set(CMAKE_ASM_MASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreadedDLL "") set(CMAKE_ASM_MASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreadedDebug "") set(CMAKE_ASM_MASM_COMPILE_OPTIONS_MSVC_RUNTIME_LIBRARY_MultiThreadedDebugDLL "") endif() # Ensure that MC is present find_program(MC mc) if (MC STREQUAL "MC-NOTFOUND") message(FATAL_ERROR "MC not found") endif() elseif (NOT CLR_CMAKE_HOST_BROWSER) enable_language(ASM) endif(CLR_CMAKE_HOST_WIN32)
1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/gc/gc.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // #Overview // // GC automatically manages memory allocated by managed code. // The design doc for GC can be found at docs/design/coreclr/botr/garbage-collection.md // // This file includes both the code for GC and the allocator. The most common // case for a GC to be triggered is from the allocator code. See // code:#try_allocate_more_space where it calls GarbageCollectGeneration. // // Entry points for the allocator are GCHeap::Alloc* which are called by the // allocation helpers in gcscan.cpp // #include "gcpriv.h" #if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) #define USE_VXSORT #else #define USE_INTROSORT #endif #ifdef DACCESS_COMPILE #error this source file should not be compiled with DACCESS_COMPILE! #endif //DACCESS_COMPILE // We just needed a simple random number generator for testing. class gc_rand { public: static uint64_t x; static uint64_t get_rand() { x = (314159269*x+278281) & 0x7FFFFFFF; return x; } // obtain random number in the range 0 .. r-1 static uint64_t get_rand(uint64_t r) { // require r >= 0 uint64_t x = (uint64_t)((get_rand() * r) >> 31); return x; } }; uint64_t gc_rand::x = 0; #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) BOOL bgc_heap_walk_for_etw_p = FALSE; #endif //BACKGROUND_GC && FEATURE_EVENT_TRACE #define MAX_PTR ((uint8_t*)(~(ptrdiff_t)0)) #define commit_min_th (16*OS_PAGE_SIZE) #define MIN_SOH_CROSS_GEN_REFS (400) #define MIN_LOH_CROSS_GEN_REFS (800) #ifdef SERVER_GC #define partial_size_th 100 #define num_partial_refs 64 #else //SERVER_GC #define partial_size_th 100 #define num_partial_refs 32 #endif //SERVER_GC #ifdef USE_REGIONS // If the pinned survived is 1+% of the region size, we don't demote. #define demotion_pinned_ratio_th (1) // If the survived / region_size is 90+%, we don't compact this region. #define sip_surv_ratio_th (90) // If the survived due to cards from old generations / region_size is 90+%, // we don't compact this region, also we immediately promote it to gen2. #define sip_old_card_surv_ratio_th (90) #else #define demotion_plug_len_th (6*1024*1024) #endif //USE_REGIONS #ifdef HOST_64BIT #define MARK_STACK_INITIAL_LENGTH 1024 #else #define MARK_STACK_INITIAL_LENGTH 128 #endif // HOST_64BIT #define LOH_PIN_QUEUE_LENGTH 100 #define LOH_PIN_DECAY 10 uint32_t yp_spin_count_unit = 0; size_t loh_size_threshold = LARGE_OBJECT_SIZE; #ifdef GC_CONFIG_DRIVEN int compact_ratio = 0; #endif //GC_CONFIG_DRIVEN // See comments in reset_memory. BOOL reset_mm_p = TRUE; #ifdef FEATURE_SVR_GC bool g_built_with_svr_gc = true; #else bool g_built_with_svr_gc = false; #endif // FEATURE_SVR_GC #if defined(BUILDENV_DEBUG) uint8_t g_build_variant = 0; #elif defined(BUILDENV_CHECKED) uint8_t g_build_variant = 1; #else uint8_t g_build_variant = 2; #endif //BUILDENV_DEBUG VOLATILE(int32_t) g_no_gc_lock = -1; #ifdef TRACE_GC const char * const allocation_state_str[] = { "start", "can_allocate", "cant_allocate", "retry_allocate", "try_fit", "try_fit_new_seg", "try_fit_after_cg", "try_fit_after_bgc", "try_free_full_seg_in_bgc", "try_free_after_bgc", "try_seg_end", "acquire_seg", "acquire_seg_after_cg", "acquire_seg_after_bgc", "check_and_wait_for_bgc", "trigger_full_compact_gc", "trigger_ephemeral_gc", "trigger_2nd_ephemeral_gc", "check_retry_seg" }; const char * const msl_take_state_str[] = { "get_large_seg", "bgc_loh_sweep", "wait_bgc", "block_gc", "clr_mem", "clr_large_mem", "t_eph_gc", "t_full_gc", "alloc_small", "alloc_large", "alloc_small_cant", "alloc_large_cant", "try_alloc", "try_budget" }; #endif //TRACE_GC // Keep this in sync with the definition of gc_reason #if (defined(DT_LOG) || defined(TRACE_GC)) static const char* const str_gc_reasons[] = { "alloc_soh", "induced", "lowmem", "empty", "alloc_loh", "oos_soh", "oos_loh", "induced_noforce", "gcstress", "induced_lowmem", "induced_compacting", "lowmemory_host", "pm_full_gc", "lowmemory_host_blocking" }; static const char* const str_gc_pause_modes[] = { "batch", "interactive", "low_latency", "sustained_low_latency", "no_gc" }; static const char* const str_root_kinds[] = { "Stack", "FinalizeQueue", "Handles", "OlderGen", "SizedRef", "Overflow", "DependentHandles", "NewFQ", "Steal", "BGC" }; #endif //DT_LOG || TRACE_GC inline BOOL is_induced (gc_reason reason) { return ((reason == reason_induced) || (reason == reason_induced_noforce) || (reason == reason_lowmemory) || (reason == reason_lowmemory_blocking) || (reason == reason_induced_compacting) || (reason == reason_lowmemory_host) || (reason == reason_lowmemory_host_blocking)); } inline BOOL is_induced_blocking (gc_reason reason) { return ((reason == reason_induced) || (reason == reason_lowmemory_blocking) || (reason == reason_induced_compacting) || (reason == reason_lowmemory_host_blocking)); } gc_oh_num gen_to_oh(int gen) { switch (gen) { case soh_gen0: return gc_oh_num::soh; case soh_gen1: return gc_oh_num::soh; case soh_gen2: return gc_oh_num::soh; case loh_generation: return gc_oh_num::loh; case poh_generation: return gc_oh_num::poh; default: return gc_oh_num::none; } } uint64_t qpf; double qpf_ms; double qpf_us; uint64_t GetHighPrecisionTimeStamp() { int64_t ts = GCToOSInterface::QueryPerformanceCounter(); return (uint64_t)((double)ts * qpf_us); } uint64_t RawGetHighPrecisionTimeStamp() { return (uint64_t)GCToOSInterface::QueryPerformanceCounter(); } #ifdef BGC_SERVO_TUNING bool gc_heap::bgc_tuning::enable_fl_tuning = false; uint32_t gc_heap::bgc_tuning::memory_load_goal = 0; uint32_t gc_heap::bgc_tuning::memory_load_goal_slack = 0; uint64_t gc_heap::bgc_tuning::available_memory_goal = 0; bool gc_heap::bgc_tuning::panic_activated_p = false; double gc_heap::bgc_tuning::accu_error_panic = 0.0; double gc_heap::bgc_tuning::above_goal_kp = 0.0; double gc_heap::bgc_tuning::above_goal_ki = 0.0; bool gc_heap::bgc_tuning::enable_kd = false; bool gc_heap::bgc_tuning::enable_ki = false; bool gc_heap::bgc_tuning::enable_smooth = false; bool gc_heap::bgc_tuning::enable_tbh = false; bool gc_heap::bgc_tuning::enable_ff = false; bool gc_heap::bgc_tuning::enable_gradual_d = false; double gc_heap::bgc_tuning::above_goal_kd = 0.0; double gc_heap::bgc_tuning::above_goal_ff = 0.0; double gc_heap::bgc_tuning::num_gen1s_smooth_factor = 0.0; double gc_heap::bgc_tuning::ml_kp = 0.0; double gc_heap::bgc_tuning::ml_ki = 0.0; double gc_heap::bgc_tuning::accu_error = 0.0; bool gc_heap::bgc_tuning::fl_tuning_triggered = false; size_t gc_heap::bgc_tuning::num_bgcs_since_tuning_trigger = 0; bool gc_heap::bgc_tuning::next_bgc_p = false; size_t gc_heap::bgc_tuning::gen1_index_last_bgc_end; size_t gc_heap::bgc_tuning::gen1_index_last_bgc_start; size_t gc_heap::bgc_tuning::gen1_index_last_bgc_sweep; size_t gc_heap::bgc_tuning::actual_num_gen1s_to_trigger; gc_heap::bgc_tuning::tuning_calculation gc_heap::bgc_tuning::gen_calc[2]; gc_heap::bgc_tuning::tuning_stats gc_heap::bgc_tuning::gen_stats[2]; gc_heap::bgc_tuning::bgc_size_data gc_heap::bgc_tuning::current_bgc_end_data[2]; size_t gc_heap::bgc_tuning::last_stepping_bgc_count = 0; uint32_t gc_heap::bgc_tuning::last_stepping_mem_load = 0; uint32_t gc_heap::bgc_tuning::stepping_interval = 0; bool gc_heap::bgc_tuning::use_stepping_trigger_p = true; double gc_heap::bgc_tuning::gen2_ratio_correction = 0.0; double gc_heap::bgc_tuning::ratio_correction_step = 0.0; int gc_heap::saved_bgc_tuning_reason = -1; #endif //BGC_SERVO_TUNING inline size_t round_up_power2 (size_t size) { // Get the 0-based index of the most-significant bit in size-1. // If the call failed (because size-1 is zero), size must be 1, // so return 1 (because 1 rounds up to itself). DWORD highest_set_bit_index; if (0 == #ifdef HOST_64BIT BitScanReverse64( #else BitScanReverse( #endif &highest_set_bit_index, size - 1)) { return 1; } // The size == 0 case (which would have overflowed to SIZE_MAX when decremented) // is handled below by relying on the fact that highest_set_bit_index is the maximum value // (31 or 63, depending on sizeof(size_t)) and left-shifting a value >= 2 by that // number of bits shifts in zeros from the right, resulting in an output of zero. return static_cast<size_t>(2) << highest_set_bit_index; } inline size_t round_down_power2 (size_t size) { // Get the 0-based index of the most-significant bit in size. // If the call failed, size must be zero so return zero. DWORD highest_set_bit_index; if (0 == #ifdef HOST_64BIT BitScanReverse64( #else BitScanReverse( #endif &highest_set_bit_index, size)) { return 0; } // Left-shift 1 by highest_set_bit_index to get back a value containing only // the most-significant set bit of size, i.e. size rounded down // to the next power-of-two value. return static_cast<size_t>(1) << highest_set_bit_index; } // Get the 0-based index of the most-significant bit in the value. // Returns -1 if the input value is zero (i.e. has no set bits). inline int index_of_highest_set_bit (size_t value) { // Get the 0-based index of the most-significant bit in the value. // If the call failed (because value is zero), return -1. DWORD highest_set_bit_index; return (0 == #ifdef HOST_64BIT BitScanReverse64( #else BitScanReverse( #endif &highest_set_bit_index, value)) ? -1 : static_cast<int>(highest_set_bit_index); } inline int relative_index_power2_plug (size_t power2) { int index = index_of_highest_set_bit (power2); assert (index <= MAX_INDEX_POWER2); return ((index < MIN_INDEX_POWER2) ? 0 : (index - MIN_INDEX_POWER2)); } inline int relative_index_power2_free_space (size_t power2) { int index = index_of_highest_set_bit (power2); assert (index <= MAX_INDEX_POWER2); return ((index < MIN_INDEX_POWER2) ? -1 : (index - MIN_INDEX_POWER2)); } #ifdef BACKGROUND_GC uint32_t bgc_alloc_spin_count = 140; uint32_t bgc_alloc_spin_count_loh = 16; uint32_t bgc_alloc_spin = 2; inline void c_write (uint32_t& place, uint32_t value) { Interlocked::Exchange (&place, value); } // If every heap's gen2 or gen3 size is less than this threshold we will do a blocking GC. const size_t bgc_min_per_heap = 4*1024*1024; int gc_heap::gchist_index = 0; gc_mechanisms_store gc_heap::gchist[max_history_count]; #ifndef MULTIPLE_HEAPS VOLATILE(bgc_state) gc_heap::current_bgc_state = bgc_not_in_process; int gc_heap::gchist_index_per_heap = 0; gc_heap::gc_history gc_heap::gchist_per_heap[max_history_count]; #endif //MULTIPLE_HEAPS #endif //BACKGROUND_GC void gc_heap::add_to_history_per_heap() { #if defined(GC_HISTORY) && defined(BACKGROUND_GC) gc_history* current_hist = &gchist_per_heap[gchist_index_per_heap]; current_hist->gc_index = settings.gc_index; current_hist->current_bgc_state = current_bgc_state; size_t elapsed = dd_gc_elapsed_time (dynamic_data_of (0)); current_hist->gc_time_ms = (uint32_t)(elapsed / 1000); current_hist->gc_efficiency = (elapsed ? (total_promoted_bytes / elapsed) : total_promoted_bytes); #ifndef USE_REGIONS current_hist->eph_low = generation_allocation_start (generation_of (max_generation - 1)); current_hist->gen0_start = generation_allocation_start (generation_of (0)); current_hist->eph_high = heap_segment_allocated (ephemeral_heap_segment); #endif //!USE_REGIONS #ifdef BACKGROUND_GC current_hist->bgc_lowest = background_saved_lowest_address; current_hist->bgc_highest = background_saved_highest_address; #endif //BACKGROUND_GC current_hist->fgc_lowest = lowest_address; current_hist->fgc_highest = highest_address; current_hist->g_lowest = g_gc_lowest_address; current_hist->g_highest = g_gc_highest_address; gchist_index_per_heap++; if (gchist_index_per_heap == max_history_count) { gchist_index_per_heap = 0; } #endif //GC_HISTORY && BACKGROUND_GC } void gc_heap::add_to_history() { #if defined(GC_HISTORY) && defined(BACKGROUND_GC) gc_mechanisms_store* current_settings = &gchist[gchist_index]; current_settings->store (&settings); gchist_index++; if (gchist_index == max_history_count) { gchist_index = 0; } #endif //GC_HISTORY && BACKGROUND_GC } #ifdef TRACE_GC BOOL gc_log_on = TRUE; FILE* gc_log = NULL; size_t gc_log_file_size = 0; size_t gc_buffer_index = 0; size_t max_gc_buffers = 0; static CLRCriticalSection gc_log_lock; // we keep this much in a buffer and only flush when the buffer is full #define gc_log_buffer_size (1024*1024) uint8_t* gc_log_buffer = 0; size_t gc_log_buffer_offset = 0; void log_va_msg(const char *fmt, va_list args) { gc_log_lock.Enter(); const int BUFFERSIZE = 4096; static char rgchBuffer[BUFFERSIZE]; char * pBuffer = &rgchBuffer[0]; pBuffer[0] = '\n'; int buffer_start = 1; int pid_len = sprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, "[%5d]", (uint32_t)GCToOSInterface::GetCurrentThreadIdForLogging()); buffer_start += pid_len; memset(&pBuffer[buffer_start], '-', BUFFERSIZE - buffer_start); int msg_len = _vsnprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, _TRUNCATE, fmt, args); if (msg_len == -1) { msg_len = BUFFERSIZE - buffer_start; } msg_len += buffer_start; if ((gc_log_buffer_offset + msg_len) > (gc_log_buffer_size - 12)) { char index_str[8]; memset (index_str, '-', 8); sprintf_s (index_str, ARRAY_SIZE(index_str), "%d", (int)gc_buffer_index); gc_log_buffer[gc_log_buffer_offset] = '\n'; memcpy (gc_log_buffer + (gc_log_buffer_offset + 1), index_str, 8); gc_buffer_index++; if (gc_buffer_index > max_gc_buffers) { fseek (gc_log, 0, SEEK_SET); gc_buffer_index = 0; } fwrite(gc_log_buffer, gc_log_buffer_size, 1, gc_log); fflush(gc_log); memset (gc_log_buffer, '*', gc_log_buffer_size); gc_log_buffer_offset = 0; } memcpy (gc_log_buffer + gc_log_buffer_offset, pBuffer, msg_len); gc_log_buffer_offset += msg_len; gc_log_lock.Leave(); } void GCLog (const char *fmt, ... ) { if (gc_log_on && (gc_log != NULL)) { va_list args; va_start(args, fmt); log_va_msg (fmt, args); va_end(args); } } #endif // TRACE_GC #ifdef GC_CONFIG_DRIVEN BOOL gc_config_log_on = FALSE; FILE* gc_config_log = NULL; // we keep this much in a buffer and only flush when the buffer is full #define gc_config_log_buffer_size (1*1024) // TEMP uint8_t* gc_config_log_buffer = 0; size_t gc_config_log_buffer_offset = 0; // For config since we log so little we keep the whole history. Also it's only // ever logged by one thread so no need to synchronize. void log_va_msg_config(const char *fmt, va_list args) { const int BUFFERSIZE = 256; static char rgchBuffer[BUFFERSIZE]; char * pBuffer = &rgchBuffer[0]; pBuffer[0] = '\n'; int buffer_start = 1; int msg_len = _vsnprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, _TRUNCATE, fmt, args ); assert (msg_len != -1); msg_len += buffer_start; if ((gc_config_log_buffer_offset + msg_len) > gc_config_log_buffer_size) { fwrite(gc_config_log_buffer, gc_config_log_buffer_offset, 1, gc_config_log); fflush(gc_config_log); gc_config_log_buffer_offset = 0; } memcpy (gc_config_log_buffer + gc_config_log_buffer_offset, pBuffer, msg_len); gc_config_log_buffer_offset += msg_len; } void GCLogConfig (const char *fmt, ... ) { if (gc_config_log_on && (gc_config_log != NULL)) { va_list args; va_start( args, fmt ); log_va_msg_config (fmt, args); } } #endif // GC_CONFIG_DRIVEN void GCHeap::Shutdown() { #if defined(TRACE_GC) && !defined(BUILD_AS_STANDALONE) if (gc_log_on && (gc_log != NULL)) { fwrite(gc_log_buffer, gc_log_buffer_offset, 1, gc_log); fflush(gc_log); fclose(gc_log); gc_log_buffer_offset = 0; } #endif //TRACE_GC && !BUILD_AS_STANDALONE } #ifdef SYNCHRONIZATION_STATS // Number of GCs have we done since we last logged. static unsigned int gc_count_during_log; // In ms. This is how often we print out stats. static const unsigned int log_interval = 5000; // Time (in ms) when we start a new log interval. static unsigned int log_start_tick; static unsigned int gc_lock_contended; static int64_t log_start_hires; // Cycles accumulated in SuspendEE during log_interval. static uint64_t suspend_ee_during_log; // Cycles accumulated in RestartEE during log_interval. static uint64_t restart_ee_during_log; static uint64_t gc_during_log; #endif //SYNCHRONIZATION_STATS void init_sync_log_stats() { #ifdef SYNCHRONIZATION_STATS if (gc_count_during_log == 0) { gc_heap::init_sync_stats(); suspend_ee_during_log = 0; restart_ee_during_log = 0; gc_during_log = 0; gc_lock_contended = 0; log_start_tick = GCToOSInterface::GetLowPrecisionTimeStamp(); log_start_hires = GCToOSInterface::QueryPerformanceCounter(); } gc_count_during_log++; #endif //SYNCHRONIZATION_STATS } void process_sync_log_stats() { #ifdef SYNCHRONIZATION_STATS unsigned int log_elapsed = GCToOSInterface::GetLowPrecisionTimeStamp() - log_start_tick; if (log_elapsed > log_interval) { uint64_t total = GCToOSInterface::QueryPerformanceCounter() - log_start_hires; // Print out the cycles we spent on average in each suspend and restart. printf("\n_________________________________________________________________________________\n" "Past %d(s): #%3d GCs; Total gc_lock contended: %8u; GC: %12u\n" "SuspendEE: %8u; RestartEE: %8u GC %.3f%%\n", log_interval / 1000, gc_count_during_log, gc_lock_contended, (unsigned int)(gc_during_log / gc_count_during_log), (unsigned int)(suspend_ee_during_log / gc_count_during_log), (unsigned int)(restart_ee_during_log / gc_count_during_log), (double)(100.0f * gc_during_log / total)); gc_heap::print_sync_stats(gc_count_during_log); gc_count_during_log = 0; } #endif //SYNCHRONIZATION_STATS } #ifdef MULTIPLE_HEAPS uint32_t g_num_active_processors = 0; // Note that when a join is no longer used we still keep the values here because // tooling already recognized them as having the meaning they were assigned originally. // It doesn't break tooling if we stop using them but does if we assign a new meaning // to them. enum gc_join_stage { gc_join_init_cpu_mapping = 0, gc_join_done = 1, gc_join_generation_determined = 2, gc_join_begin_mark_phase = 3, gc_join_scan_dependent_handles = 4, gc_join_rescan_dependent_handles = 5, gc_join_scan_sizedref_done = 6, gc_join_null_dead_short_weak = 7, gc_join_scan_finalization = 8, gc_join_null_dead_long_weak = 9, gc_join_null_dead_syncblk = 10, gc_join_decide_on_compaction = 11, gc_join_rearrange_segs_compaction = 12, gc_join_adjust_handle_age_compact = 13, gc_join_adjust_handle_age_sweep = 14, gc_join_begin_relocate_phase = 15, gc_join_relocate_phase_done = 16, gc_join_verify_objects_done = 17, gc_join_start_bgc = 18, gc_join_restart_ee = 19, gc_join_concurrent_overflow = 20, gc_join_suspend_ee = 21, gc_join_bgc_after_ephemeral = 22, gc_join_allow_fgc = 23, gc_join_bgc_sweep = 24, gc_join_suspend_ee_verify = 25, gc_join_restart_ee_verify = 26, gc_join_set_state_free = 27, gc_r_join_update_card_bundle = 28, gc_join_after_absorb = 29, gc_join_verify_copy_table = 30, gc_join_after_reset = 31, gc_join_after_ephemeral_sweep = 32, gc_join_after_profiler_heap_walk = 33, gc_join_minimal_gc = 34, gc_join_after_commit_soh_no_gc = 35, gc_join_expand_loh_no_gc = 36, gc_join_final_no_gc = 37, // No longer in use but do not remove, see comments for this enum. gc_join_disable_software_write_watch = 38, gc_join_max = 39 }; enum gc_join_flavor { join_flavor_server_gc = 0, join_flavor_bgc = 1 }; #define first_thread_arrived 2 #pragma warning(push) #pragma warning(disable:4324) // don't complain if DECLSPEC_ALIGN actually pads struct DECLSPEC_ALIGN(HS_CACHE_LINE_SIZE) join_structure { // Shared non volatile keep on separate line to prevent eviction int n_threads; // Keep polling/wait structures on separate line write once per join DECLSPEC_ALIGN(HS_CACHE_LINE_SIZE) GCEvent joined_event[3]; // the last event in the array is only used for first_thread_arrived. Volatile<int> lock_color; VOLATILE(BOOL) wait_done; VOLATILE(BOOL) joined_p; // Keep volatile counted locks on separate cache line write many per join DECLSPEC_ALIGN(HS_CACHE_LINE_SIZE) VOLATILE(int) join_lock; VOLATILE(int) r_join_lock; }; #pragma warning(pop) enum join_type { type_last_join = 0, type_join = 1, type_restart = 2, type_first_r_join = 3, type_r_join = 4 }; enum join_time { time_start = 0, time_end = 1 }; enum join_heap_index { join_heap_restart = 100, join_heap_r_restart = 200 }; class t_join { join_structure join_struct; int id; gc_join_flavor flavor; #ifdef JOIN_STATS uint64_t start[MAX_SUPPORTED_CPUS], end[MAX_SUPPORTED_CPUS], start_seq; // remember join id and last thread to arrive so restart can use these int thd; // we want to print statistics every 10 seconds - this is to remember the start of the 10 sec interval uint32_t start_tick; // counters for joins, in 1000's of clock cycles uint64_t elapsed_total[gc_join_max], wake_total[gc_join_max], seq_loss_total[gc_join_max], par_loss_total[gc_join_max], in_join_total[gc_join_max]; #endif //JOIN_STATS public: BOOL init (int n_th, gc_join_flavor f) { dprintf (JOIN_LOG, ("Initializing join structure")); join_struct.n_threads = n_th; join_struct.lock_color = 0; for (int i = 0; i < 3; i++) { if (!join_struct.joined_event[i].IsValid()) { join_struct.joined_p = FALSE; dprintf (JOIN_LOG, ("Creating join event %d", i)); // TODO - changing this to a non OS event // because this is also used by BGC threads which are // managed threads and WaitEx does not allow you to wait // for an OS event on a managed thread. // But we are not sure if this plays well in the hosting // environment. //join_struct.joined_event[i].CreateOSManualEventNoThrow(FALSE); if (!join_struct.joined_event[i].CreateManualEventNoThrow(FALSE)) return FALSE; } } join_struct.join_lock = join_struct.n_threads; join_struct.r_join_lock = join_struct.n_threads; join_struct.wait_done = FALSE; flavor = f; #ifdef JOIN_STATS start_tick = GCToOSInterface::GetLowPrecisionTimeStamp(); #endif //JOIN_STATS return TRUE; } void destroy () { dprintf (JOIN_LOG, ("Destroying join structure")); for (int i = 0; i < 3; i++) { if (join_struct.joined_event[i].IsValid()) join_struct.joined_event[i].CloseEvent(); } } inline void fire_event (int heap, join_time time, join_type type, int join_id) { FIRE_EVENT(GCJoin_V2, heap, time, type, join_id); } void join (gc_heap* gch, int join_id) { #ifdef JOIN_STATS // parallel execution ends here end[gch->heap_number] = get_ts(); #endif //JOIN_STATS assert (!join_struct.joined_p); int color = join_struct.lock_color.LoadWithoutBarrier(); if (Interlocked::Decrement(&join_struct.join_lock) != 0) { dprintf (JOIN_LOG, ("join%d(%d): Join() Waiting...join_lock is now %d", flavor, join_id, (int32_t)(join_struct.join_lock))); fire_event (gch->heap_number, time_start, type_join, join_id); //busy wait around the color if (color == join_struct.lock_color.LoadWithoutBarrier()) { respin: int spin_count = 128 * yp_spin_count_unit; for (int j = 0; j < spin_count; j++) { if (color != join_struct.lock_color.LoadWithoutBarrier()) { break; } YieldProcessor(); // indicate to the processor that we are spinning } // we've spun, and if color still hasn't changed, fall into hard wait if (color == join_struct.lock_color.LoadWithoutBarrier()) { dprintf (JOIN_LOG, ("join%d(%d): Join() hard wait on reset event %d, join_lock is now %d", flavor, join_id, color, (int32_t)(join_struct.join_lock))); uint32_t dwJoinWait = join_struct.joined_event[color].Wait(INFINITE, FALSE); if (dwJoinWait != WAIT_OBJECT_0) { STRESS_LOG1 (LF_GC, LL_FATALERROR, "joined event wait failed with code: %Ix", dwJoinWait); FATAL_GC_ERROR (); } } // avoid race due to the thread about to reset the event (occasionally) being preempted before ResetEvent() if (color == join_struct.lock_color.LoadWithoutBarrier()) { goto respin; } dprintf (JOIN_LOG, ("join%d(%d): Join() done, join_lock is %d", flavor, join_id, (int32_t)(join_struct.join_lock))); } fire_event (gch->heap_number, time_end, type_join, join_id); #ifdef JOIN_STATS // parallel execution starts here start[gch->heap_number] = get_ts(); Interlocked::ExchangeAdd(&in_join_total[join_id], (start[gch->heap_number] - end[gch->heap_number])); #endif //JOIN_STATS } else { fire_event (gch->heap_number, time_start, type_last_join, join_id); join_struct.joined_p = TRUE; dprintf (JOIN_LOG, ("join%d(%d): Last thread to complete the join, setting id", flavor, join_id)); join_struct.joined_event[!color].Reset(); id = join_id; #ifdef JOIN_STATS // remember the join id, the last thread arriving, the start of the sequential phase, // and keep track of the cycles spent waiting in the join thd = gch->heap_number; start_seq = get_ts(); Interlocked::ExchangeAdd(&in_join_total[join_id], (start_seq - end[gch->heap_number])); #endif //JOIN_STATS } } // Reverse join - first thread gets here does the work; other threads will only proceed // after the work is done. // Note that you cannot call this twice in a row on the same thread. Plus there's no // need to call it twice in row - you should just merge the work. BOOL r_join (gc_heap* gch, int join_id) { if (join_struct.n_threads == 1) { return TRUE; } if (Interlocked::CompareExchange(&join_struct.r_join_lock, 0, join_struct.n_threads) == 0) { fire_event (gch->heap_number, time_start, type_join, join_id); dprintf (JOIN_LOG, ("r_join() Waiting...")); //busy wait around the color respin: int spin_count = 256 * yp_spin_count_unit; for (int j = 0; j < spin_count; j++) { if (join_struct.wait_done) { break; } YieldProcessor(); // indicate to the processor that we are spinning } // we've spun, and if color still hasn't changed, fall into hard wait if (!join_struct.wait_done) { dprintf (JOIN_LOG, ("Join() hard wait on reset event %d", first_thread_arrived)); uint32_t dwJoinWait = join_struct.joined_event[first_thread_arrived].Wait(INFINITE, FALSE); if (dwJoinWait != WAIT_OBJECT_0) { STRESS_LOG1 (LF_GC, LL_FATALERROR, "joined event wait failed with code: %Ix", dwJoinWait); FATAL_GC_ERROR (); } } // avoid race due to the thread about to reset the event (occasionally) being preempted before ResetEvent() if (!join_struct.wait_done) { goto respin; } dprintf (JOIN_LOG, ("r_join() done")); fire_event (gch->heap_number, time_end, type_join, join_id); return FALSE; } else { fire_event (gch->heap_number, time_start, type_first_r_join, join_id); return TRUE; } } #ifdef JOIN_STATS uint64_t get_ts() { return GCToOSInterface::QueryPerformanceCounter(); } void start_ts (gc_heap* gch) { // parallel execution ends here start[gch->heap_number] = get_ts(); } #endif //JOIN_STATS void restart() { #ifdef JOIN_STATS uint64_t elapsed_seq = get_ts() - start_seq; uint64_t max = 0, sum = 0, wake = 0; uint64_t min_ts = start[0]; for (int i = 1; i < join_struct.n_threads; i++) { if(min_ts > start[i]) min_ts = start[i]; } for (int i = 0; i < join_struct.n_threads; i++) { uint64_t wake_delay = start[i] - min_ts; uint64_t elapsed = end[i] - start[i]; if (max < elapsed) max = elapsed; sum += elapsed; wake += wake_delay; } uint64_t seq_loss = (join_struct.n_threads - 1)*elapsed_seq; uint64_t par_loss = join_struct.n_threads*max - sum; double efficiency = 0.0; if (max > 0) efficiency = sum*100.0/(join_struct.n_threads*max); const double ts_scale = 1e-6; // enable this printf to get statistics on each individual join as it occurs //printf("join #%3d seq_loss = %5g par_loss = %5g efficiency = %3.0f%%\n", join_id, ts_scale*seq_loss, ts_scale*par_loss, efficiency); elapsed_total[id] += sum; wake_total[id] += wake; seq_loss_total[id] += seq_loss; par_loss_total[id] += par_loss; // every 10 seconds, print a summary of the time spent in each type of join if (GCToOSInterface::GetLowPrecisionTimeStamp() - start_tick > 10*1000) { printf("**** summary *****\n"); for (int i = 0; i < 16; i++) { printf("join #%3d elapsed_total = %8g wake_loss = %8g seq_loss = %8g par_loss = %8g in_join_total = %8g\n", i, ts_scale*elapsed_total[i], ts_scale*wake_total[i], ts_scale*seq_loss_total[i], ts_scale*par_loss_total[i], ts_scale*in_join_total[i]); elapsed_total[i] = wake_total[i] = seq_loss_total[i] = par_loss_total[i] = in_join_total[i] = 0; } start_tick = GCToOSInterface::GetLowPrecisionTimeStamp(); } #endif //JOIN_STATS fire_event (join_heap_restart, time_start, type_restart, -1); assert (join_struct.joined_p); join_struct.joined_p = FALSE; join_struct.join_lock = join_struct.n_threads; dprintf (JOIN_LOG, ("join%d(%d): Restarting from join: join_lock is %d", flavor, id, (int32_t)(join_struct.join_lock))); int color = join_struct.lock_color.LoadWithoutBarrier(); join_struct.lock_color = !color; join_struct.joined_event[color].Set(); fire_event (join_heap_restart, time_end, type_restart, -1); #ifdef JOIN_STATS start[thd] = get_ts(); #endif //JOIN_STATS } BOOL joined() { dprintf (JOIN_LOG, ("join%d(%d): joined, join_lock is %d", flavor, id, (int32_t)(join_struct.join_lock))); return join_struct.joined_p; } void r_restart() { if (join_struct.n_threads != 1) { fire_event (join_heap_r_restart, time_start, type_restart, -1); join_struct.wait_done = TRUE; join_struct.joined_event[first_thread_arrived].Set(); fire_event (join_heap_r_restart, time_end, type_restart, -1); } } void r_init() { if (join_struct.n_threads != 1) { join_struct.r_join_lock = join_struct.n_threads; join_struct.wait_done = FALSE; join_struct.joined_event[first_thread_arrived].Reset(); } } }; t_join gc_t_join; #ifdef BACKGROUND_GC t_join bgc_t_join; #endif //BACKGROUND_GC #endif //MULTIPLE_HEAPS #define spin_and_switch(count_to_spin, expr) \ { \ for (int j = 0; j < count_to_spin; j++) \ { \ if (expr) \ { \ break;\ } \ YieldProcessor(); \ } \ if (!(expr)) \ { \ GCToOSInterface::YieldThread(0); \ } \ } #ifdef BACKGROUND_GC #define max_pending_allocs 64 class exclusive_sync { VOLATILE(uint8_t*) rwp_object; VOLATILE(int32_t) needs_checking; int spin_count; uint8_t cache_separator[HS_CACHE_LINE_SIZE - sizeof (int) - sizeof (int32_t)]; // TODO - perhaps each object should be on its own cache line... VOLATILE(uint8_t*) alloc_objects[max_pending_allocs]; int find_free_index () { for (int i = 0; i < max_pending_allocs; i++) { if (alloc_objects [i] == (uint8_t*)0) { return i; } } return -1; } public: void init() { spin_count = 32 * (g_num_processors - 1); rwp_object = 0; needs_checking = 0; for (int i = 0; i < max_pending_allocs; i++) { alloc_objects [i] = (uint8_t*)0; } } void check() { for (int i = 0; i < max_pending_allocs; i++) { if (alloc_objects [i] != (uint8_t*)0) { FATAL_GC_ERROR(); } } } void bgc_mark_set (uint8_t* obj) { dprintf (3, ("cm: probing %Ix", obj)); retry: if (Interlocked::CompareExchange(&needs_checking, 1, 0) == 0) { // If we spend too much time spending all the allocs, // consider adding a high water mark and scan up // to that; we'll need to interlock in done when // we update the high watermark. for (int i = 0; i < max_pending_allocs; i++) { if (obj == alloc_objects[i]) { needs_checking = 0; dprintf (3, ("cm: will spin")); spin_and_switch (spin_count, (obj != alloc_objects[i])); goto retry; } } rwp_object = obj; needs_checking = 0; dprintf (3, ("cm: set %Ix", obj)); return; } else { spin_and_switch (spin_count, (needs_checking == 0)); goto retry; } } int uoh_alloc_set (uint8_t* obj) { if (!gc_heap::cm_in_progress) { return -1; } retry: dprintf (3, ("uoh alloc: probing %Ix", obj)); if (Interlocked::CompareExchange(&needs_checking, 1, 0) == 0) { if (obj == rwp_object) { needs_checking = 0; spin_and_switch (spin_count, (obj != rwp_object)); goto retry; } else { int cookie = find_free_index(); if (cookie != -1) { alloc_objects[cookie] = obj; needs_checking = 0; //if (cookie >= 4) //{ // GCToOSInterface::DebugBreak(); //} dprintf (3, ("uoh alloc: set %Ix at %d", obj, cookie)); return cookie; } else { needs_checking = 0; dprintf (3, ("uoh alloc: setting %Ix will spin to acquire a free index", obj)); spin_and_switch (spin_count, (find_free_index () != -1)); goto retry; } } } else { dprintf (3, ("uoh alloc: will spin on checking %Ix", obj)); spin_and_switch (spin_count, (needs_checking == 0)); goto retry; } } void bgc_mark_done () { dprintf (3, ("cm: release lock on %Ix", (uint8_t *)rwp_object)); rwp_object = 0; } void uoh_alloc_done_with_index (int index) { dprintf (3, ("uoh alloc: release lock on %Ix based on %d", (uint8_t *)alloc_objects[index], index)); assert ((index >= 0) && (index < max_pending_allocs)); alloc_objects[index] = (uint8_t*)0; } void uoh_alloc_done (uint8_t* obj) { if (!gc_heap::cm_in_progress) { return; } for (int i = 0; i < max_pending_allocs; i++) { if (alloc_objects [i] == obj) { uoh_alloc_done_with_index(i); return; } } } }; #endif //BACKGROUND_GC void reset_memory (uint8_t* o, size_t sizeo); #ifdef WRITE_WATCH #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP static bool virtual_alloc_hardware_write_watch = false; #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP static bool hardware_write_watch_capability = false; void hardware_write_watch_api_supported() { if (GCToOSInterface::SupportsWriteWatch()) { hardware_write_watch_capability = true; dprintf (2, ("WriteWatch supported")); } else { dprintf (2,("WriteWatch not supported")); } } inline bool can_use_hardware_write_watch() { return hardware_write_watch_capability; } inline bool can_use_write_watch_for_gc_heap() { #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP return true; #else // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP return can_use_hardware_write_watch(); #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP } inline bool can_use_write_watch_for_card_table() { #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES return true; #else return can_use_hardware_write_watch(); #endif } #else #define mem_reserve (MEM_RESERVE) #endif //WRITE_WATCH //check if the low memory notification is supported void WaitLongerNoInstru (int i) { // every 8th attempt: bool bToggleGC = GCToEEInterface::EnablePreemptiveGC(); // if we're waiting for gc to finish, we should block immediately if (g_fSuspensionPending == 0) { if (g_num_processors > 1) { YieldProcessor(); // indicate to the processor that we are spinning if (i & 0x01f) GCToOSInterface::YieldThread (0); else GCToOSInterface::Sleep (5); } else GCToOSInterface::Sleep (5); } // If CLR is hosted, a thread may reach here while it is in preemptive GC mode, // or it has no Thread object, in order to force a task to yield, or to triger a GC. // It is important that the thread is going to wait for GC. Otherwise the thread // is in a tight loop. If the thread has high priority, the perf is going to be very BAD. if (bToggleGC) { #ifdef _DEBUG // In debug builds, all enter_spin_lock operations go through this code. If a GC has // started, it is important to block until the GC thread calls set_gc_done (since it is // guaranteed to have cleared g_TrapReturningThreads by this point). This avoids livelock // conditions which can otherwise occur if threads are allowed to spin in this function // (and therefore starve the GC thread) between the point when the GC thread sets the // WaitForGC event and the point when the GC thread clears g_TrapReturningThreads. if (gc_heap::gc_started) { gc_heap::wait_for_gc_done(); } #endif // _DEBUG GCToEEInterface::DisablePreemptiveGC(); } else if (g_fSuspensionPending > 0) { g_theGCHeap->WaitUntilGCComplete(); } } inline static void safe_switch_to_thread() { bool cooperative_mode = gc_heap::enable_preemptive(); GCToOSInterface::YieldThread(0); gc_heap::disable_preemptive(cooperative_mode); } // // We need the following methods to have volatile arguments, so that they can accept // raw pointers in addition to the results of the & operator on Volatile<T>. // inline static void enter_spin_lock_noinstru (RAW_KEYWORD(volatile) int32_t* lock) { retry: if (Interlocked::CompareExchange(lock, 0, -1) >= 0) { unsigned int i = 0; while (VolatileLoad(lock) >= 0) { if ((++i & 7) && !IsGCInProgress()) { if (g_num_processors > 1) { #ifndef MULTIPLE_HEAPS int spin_count = 32 * yp_spin_count_unit; #else //!MULTIPLE_HEAPS int spin_count = yp_spin_count_unit; #endif //!MULTIPLE_HEAPS for (int j = 0; j < spin_count; j++) { if (VolatileLoad(lock) < 0 || IsGCInProgress()) break; YieldProcessor(); // indicate to the processor that we are spinning } if (VolatileLoad(lock) >= 0 && !IsGCInProgress()) { safe_switch_to_thread(); } } else { safe_switch_to_thread(); } } else { WaitLongerNoInstru(i); } } goto retry; } } inline static BOOL try_enter_spin_lock_noinstru(RAW_KEYWORD(volatile) int32_t* lock) { return (Interlocked::CompareExchange(&*lock, 0, -1) < 0); } inline static void leave_spin_lock_noinstru (RAW_KEYWORD(volatile) int32_t* lock) { VolatileStore<int32_t>((int32_t*)lock, -1); } #ifdef _DEBUG inline static void enter_spin_lock(GCSpinLock *pSpinLock) { enter_spin_lock_noinstru(&pSpinLock->lock); assert (pSpinLock->holding_thread == (Thread*)-1); pSpinLock->holding_thread = GCToEEInterface::GetThread(); } inline static BOOL try_enter_spin_lock(GCSpinLock *pSpinLock) { BOOL ret = try_enter_spin_lock_noinstru(&pSpinLock->lock); if (ret) pSpinLock->holding_thread = GCToEEInterface::GetThread(); return ret; } inline static void leave_spin_lock(GCSpinLock *pSpinLock) { bool gc_thread_p = GCToEEInterface::WasCurrentThreadCreatedByGC(); pSpinLock->released_by_gc_p = gc_thread_p; pSpinLock->holding_thread = (Thread*) -1; if (pSpinLock->lock != -1) leave_spin_lock_noinstru(&pSpinLock->lock); } #define ASSERT_HOLDING_SPIN_LOCK(pSpinLock) \ _ASSERTE((pSpinLock)->holding_thread == GCToEEInterface::GetThread()); #define ASSERT_NOT_HOLDING_SPIN_LOCK(pSpinLock) \ _ASSERTE((pSpinLock)->holding_thread != GCToEEInterface::GetThread()); #else //_DEBUG //In the concurrent version, the Enable/DisablePreemptiveGC is optional because //the gc thread call WaitLonger. void WaitLonger (int i #ifdef SYNCHRONIZATION_STATS , GCSpinLock* spin_lock #endif //SYNCHRONIZATION_STATS ) { #ifdef SYNCHRONIZATION_STATS (spin_lock->num_wait_longer)++; #endif //SYNCHRONIZATION_STATS // every 8th attempt: bool bToggleGC = GCToEEInterface::EnablePreemptiveGC(); assert (bToggleGC); // if we're waiting for gc to finish, we should block immediately if (!gc_heap::gc_started) { #ifdef SYNCHRONIZATION_STATS (spin_lock->num_switch_thread_w)++; #endif //SYNCHRONIZATION_STATS if (g_num_processors > 1) { YieldProcessor(); // indicate to the processor that we are spinning if (i & 0x01f) GCToOSInterface::YieldThread (0); else GCToOSInterface::Sleep (5); } else GCToOSInterface::Sleep (5); } // If CLR is hosted, a thread may reach here while it is in preemptive GC mode, // or it has no Thread object, in order to force a task to yield, or to triger a GC. // It is important that the thread is going to wait for GC. Otherwise the thread // is in a tight loop. If the thread has high priority, the perf is going to be very BAD. if (gc_heap::gc_started) { gc_heap::wait_for_gc_done(); } if (bToggleGC) { #ifdef SYNCHRONIZATION_STATS (spin_lock->num_disable_preemptive_w)++; #endif //SYNCHRONIZATION_STATS GCToEEInterface::DisablePreemptiveGC(); } } inline static void enter_spin_lock (GCSpinLock* spin_lock) { retry: if (Interlocked::CompareExchange(&spin_lock->lock, 0, -1) >= 0) { unsigned int i = 0; while (spin_lock->lock >= 0) { if ((++i & 7) && !gc_heap::gc_started) { if (g_num_processors > 1) { #ifndef MULTIPLE_HEAPS int spin_count = 32 * yp_spin_count_unit; #else //!MULTIPLE_HEAPS int spin_count = yp_spin_count_unit; #endif //!MULTIPLE_HEAPS for (int j = 0; j < spin_count; j++) { if (spin_lock->lock < 0 || gc_heap::gc_started) break; YieldProcessor(); // indicate to the processor that we are spinning } if (spin_lock->lock >= 0 && !gc_heap::gc_started) { #ifdef SYNCHRONIZATION_STATS (spin_lock->num_switch_thread)++; #endif //SYNCHRONIZATION_STATS bool cooperative_mode = gc_heap::enable_preemptive (); GCToOSInterface::YieldThread(0); gc_heap::disable_preemptive (cooperative_mode); } } else GCToOSInterface::YieldThread(0); } else { WaitLonger(i #ifdef SYNCHRONIZATION_STATS , spin_lock #endif //SYNCHRONIZATION_STATS ); } } goto retry; } } inline static BOOL try_enter_spin_lock(GCSpinLock* spin_lock) { return (Interlocked::CompareExchange(&spin_lock->lock, 0, -1) < 0); } inline static void leave_spin_lock (GCSpinLock * spin_lock) { spin_lock->lock = -1; } #define ASSERT_HOLDING_SPIN_LOCK(pSpinLock) #endif //_DEBUG bool gc_heap::enable_preemptive () { return GCToEEInterface::EnablePreemptiveGC(); } void gc_heap::disable_preemptive (bool restore_cooperative) { if (restore_cooperative) { GCToEEInterface::DisablePreemptiveGC(); } } typedef void ** PTR_PTR; inline void memclr ( uint8_t* mem, size_t size) { dprintf (3, ("MEMCLR: %Ix, %d", mem, size)); assert ((size & (sizeof(PTR_PTR)-1)) == 0); assert (sizeof(PTR_PTR) == DATA_ALIGNMENT); memset (mem, 0, size); } void memcopy (uint8_t* dmem, uint8_t* smem, size_t size) { const size_t sz4ptr = sizeof(PTR_PTR)*4; const size_t sz2ptr = sizeof(PTR_PTR)*2; const size_t sz1ptr = sizeof(PTR_PTR)*1; assert ((size & (sizeof (PTR_PTR)-1)) == 0); assert (sizeof(PTR_PTR) == DATA_ALIGNMENT); // copy in groups of four pointer sized things at a time if (size >= sz4ptr) { do { ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0]; ((PTR_PTR)dmem)[1] = ((PTR_PTR)smem)[1]; ((PTR_PTR)dmem)[2] = ((PTR_PTR)smem)[2]; ((PTR_PTR)dmem)[3] = ((PTR_PTR)smem)[3]; dmem += sz4ptr; smem += sz4ptr; } while ((size -= sz4ptr) >= sz4ptr); } // still two pointer sized things or more left to copy? if (size & sz2ptr) { ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0]; ((PTR_PTR)dmem)[1] = ((PTR_PTR)smem)[1]; dmem += sz2ptr; smem += sz2ptr; } // still one pointer sized thing left to copy? if (size & sz1ptr) { ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0]; } } inline ptrdiff_t round_down (ptrdiff_t add, int pitch) { return ((add / pitch) * pitch); } #if defined(FEATURE_STRUCTALIGN) && defined(RESPECT_LARGE_ALIGNMENT) // FEATURE_STRUCTALIGN allows the compiler to dictate the alignment, // i.e, if a larger alignment matters or is beneficial, the compiler // generated info tells us so. RESPECT_LARGE_ALIGNMENT is just the // converse - it's a heuristic for the GC to use a larger alignment. #error FEATURE_STRUCTALIGN should imply !RESPECT_LARGE_ALIGNMENT #endif #if defined(FEATURE_STRUCTALIGN) && defined(FEATURE_LOH_COMPACTION) #error FEATURE_STRUCTALIGN and FEATURE_LOH_COMPACTION are mutually exclusive #endif // Returns true if two pointers have the same large (double than normal) alignment. inline BOOL same_large_alignment_p (uint8_t* p1, uint8_t* p2) { #ifdef RESPECT_LARGE_ALIGNMENT const size_t LARGE_ALIGNMENT_MASK = 2 * DATA_ALIGNMENT - 1; return ((((size_t)p1 ^ (size_t)p2) & LARGE_ALIGNMENT_MASK) == 0); #else UNREFERENCED_PARAMETER(p1); UNREFERENCED_PARAMETER(p2); return TRUE; #endif // RESPECT_LARGE_ALIGNMENT } // Determines the padding size required to fix large alignment during relocation. inline size_t switch_alignment_size (BOOL already_padded_p) { #ifndef RESPECT_LARGE_ALIGNMENT assert (!"Should not be called"); #endif // RESPECT_LARGE_ALIGNMENT if (already_padded_p) return DATA_ALIGNMENT; else return Align (min_obj_size) | DATA_ALIGNMENT; } #ifdef FEATURE_STRUCTALIGN void set_node_aligninfo (uint8_t *node, int requiredAlignment, ptrdiff_t pad); void clear_node_aligninfo (uint8_t *node); #else // FEATURE_STRUCTALIGN #define node_realigned(node) (((plug_and_reloc*)(node))[-1].reloc & 1) void set_node_realigned (uint8_t* node); void clear_node_realigned(uint8_t* node); #endif // FEATURE_STRUCTALIGN inline size_t AlignQword (size_t nbytes) { #ifdef FEATURE_STRUCTALIGN // This function is used to align everything on the large object // heap to an 8-byte boundary, to reduce the number of unaligned // accesses to (say) arrays of doubles. With FEATURE_STRUCTALIGN, // the compiler dictates the optimal alignment instead of having // a heuristic in the GC. return Align (nbytes); #else // FEATURE_STRUCTALIGN return (nbytes + 7) & ~7; #endif // FEATURE_STRUCTALIGN } inline BOOL Aligned (size_t n) { return (n & ALIGNCONST) == 0; } #define OBJECT_ALIGNMENT_OFFSET (sizeof(MethodTable *)) #ifdef FEATURE_STRUCTALIGN #define MAX_STRUCTALIGN OS_PAGE_SIZE #else // FEATURE_STRUCTALIGN #define MAX_STRUCTALIGN 0 #endif // FEATURE_STRUCTALIGN #ifdef FEATURE_STRUCTALIGN inline ptrdiff_t AdjustmentForMinPadSize(ptrdiff_t pad, int requiredAlignment) { // The resulting alignpad must be either 0 or at least min_obj_size. // Note that by computing the following difference on unsigned types, // we can do the range check 0 < alignpad < min_obj_size with a // single conditional branch. if ((size_t)(pad - DATA_ALIGNMENT) < Align (min_obj_size) - DATA_ALIGNMENT) { return requiredAlignment; } return 0; } inline uint8_t* StructAlign (uint8_t* origPtr, int requiredAlignment, ptrdiff_t alignmentOffset=OBJECT_ALIGNMENT_OFFSET) { // required alignment must be a power of two _ASSERTE(((size_t)origPtr & ALIGNCONST) == 0); _ASSERTE(((requiredAlignment - 1) & requiredAlignment) == 0); _ASSERTE(requiredAlignment >= sizeof(void *)); _ASSERTE(requiredAlignment <= MAX_STRUCTALIGN); // When this method is invoked for individual objects (i.e., alignmentOffset // is just the size of the PostHeader), what needs to be aligned when // we're done is the pointer to the payload of the object (which means // the actual resulting object pointer is typically not aligned). uint8_t* result = (uint8_t*)Align ((size_t)origPtr + alignmentOffset, requiredAlignment-1) - alignmentOffset; ptrdiff_t alignpad = result - origPtr; return result + AdjustmentForMinPadSize (alignpad, requiredAlignment); } inline ptrdiff_t ComputeStructAlignPad (uint8_t* plug, int requiredAlignment, size_t alignmentOffset=OBJECT_ALIGNMENT_OFFSET) { return StructAlign (plug, requiredAlignment, alignmentOffset) - plug; } BOOL IsStructAligned (uint8_t *ptr, int requiredAlignment) { return StructAlign (ptr, requiredAlignment) == ptr; } inline ptrdiff_t ComputeMaxStructAlignPad (int requiredAlignment) { if (requiredAlignment == DATA_ALIGNMENT) return 0; // Since a non-zero alignment padding cannot be less than min_obj_size (so we can fit the // alignment padding object), the worst-case alignment padding is correspondingly larger // than the required alignment. return requiredAlignment + Align (min_obj_size) - DATA_ALIGNMENT; } inline ptrdiff_t ComputeMaxStructAlignPadLarge (int requiredAlignment) { if (requiredAlignment <= get_alignment_constant (TRUE)+1) return 0; // This is the same as ComputeMaxStructAlignPad, except that in addition to leaving space // for padding before the actual object, it also leaves space for filling a gap after the // actual object. This is needed on the large object heap, as the outer allocation functions // don't operate on an allocation context (which would have left space for the final gap). return requiredAlignment + Align (min_obj_size) * 2 - DATA_ALIGNMENT; } uint8_t* gc_heap::pad_for_alignment (uint8_t* newAlloc, int requiredAlignment, size_t size, alloc_context* acontext) { uint8_t* alignedPtr = StructAlign (newAlloc, requiredAlignment); if (alignedPtr != newAlloc) { make_unused_array (newAlloc, alignedPtr - newAlloc); } acontext->alloc_ptr = alignedPtr + Align (size); return alignedPtr; } uint8_t* gc_heap::pad_for_alignment_large (uint8_t* newAlloc, int requiredAlignment, size_t size) { uint8_t* alignedPtr = StructAlign (newAlloc, requiredAlignment); if (alignedPtr != newAlloc) { make_unused_array (newAlloc, alignedPtr - newAlloc); } if (alignedPtr < newAlloc + ComputeMaxStructAlignPadLarge (requiredAlignment)) { make_unused_array (alignedPtr + AlignQword (size), newAlloc + ComputeMaxStructAlignPadLarge (requiredAlignment) - alignedPtr); } return alignedPtr; } #else // FEATURE_STRUCTALIGN #define ComputeMaxStructAlignPad(requiredAlignment) 0 #define ComputeMaxStructAlignPadLarge(requiredAlignment) 0 #endif // FEATURE_STRUCTALIGN //CLR_SIZE is the max amount of bytes from gen0 that is set to 0 in one chunk #ifdef SERVER_GC #define CLR_SIZE ((size_t)(8*1024)) #else //SERVER_GC #define CLR_SIZE ((size_t)(8*1024)) #endif //SERVER_GC #define END_SPACE_AFTER_GC (loh_size_threshold + MAX_STRUCTALIGN) // When we fit into the free list we need an extra of a min obj #define END_SPACE_AFTER_GC_FL (END_SPACE_AFTER_GC + Align (min_obj_size)) #if defined(BACKGROUND_GC) && !defined(USE_REGIONS) #define SEGMENT_INITIAL_COMMIT (2*OS_PAGE_SIZE) #else #define SEGMENT_INITIAL_COMMIT (OS_PAGE_SIZE) #endif //BACKGROUND_GC && !USE_REGIONS // This is always power of 2. const size_t min_segment_size_hard_limit = 1024*1024*16; inline size_t align_on_segment_hard_limit (size_t add) { return ((size_t)(add + (min_segment_size_hard_limit - 1)) & ~(min_segment_size_hard_limit - 1)); } #ifdef SERVER_GC #ifdef HOST_64BIT #define INITIAL_ALLOC ((size_t)((size_t)4*1024*1024*1024)) #define LHEAP_ALLOC ((size_t)(1024*1024*256)) #else #define INITIAL_ALLOC ((size_t)(1024*1024*64)) #define LHEAP_ALLOC ((size_t)(1024*1024*32)) #endif // HOST_64BIT #else //SERVER_GC #ifdef HOST_64BIT #define INITIAL_ALLOC ((size_t)(1024*1024*256)) #define LHEAP_ALLOC ((size_t)(1024*1024*128)) #else #define INITIAL_ALLOC ((size_t)(1024*1024*16)) #define LHEAP_ALLOC ((size_t)(1024*1024*16)) #endif // HOST_64BIT #endif //SERVER_GC const size_t etw_allocation_tick = 100*1024; const size_t low_latency_alloc = 256*1024; const size_t fgn_check_quantum = 2*1024*1024; #ifdef MH_SC_MARK const int max_snoop_level = 128; #endif //MH_SC_MARK #ifdef CARD_BUNDLE //threshold of heap size to turn on card bundles. #define SH_TH_CARD_BUNDLE (40*1024*1024) #define MH_TH_CARD_BUNDLE (180*1024*1024) #endif //CARD_BUNDLE // min size to decommit to make the OS call worthwhile #define MIN_DECOMMIT_SIZE (100*OS_PAGE_SIZE) // max size to decommit per millisecond #define DECOMMIT_SIZE_PER_MILLISECOND (160*1024) // time in milliseconds between decommit steps #define DECOMMIT_TIME_STEP_MILLISECONDS (100) inline size_t align_on_page (size_t add) { return ((add + OS_PAGE_SIZE - 1) & ~((size_t)OS_PAGE_SIZE - 1)); } inline uint8_t* align_on_page (uint8_t* add) { return (uint8_t*)align_on_page ((size_t) add); } inline size_t align_lower_page (size_t add) { return (add & ~((size_t)OS_PAGE_SIZE - 1)); } inline uint8_t* align_lower_page (uint8_t* add) { return (uint8_t*)align_lower_page ((size_t)add); } inline size_t align_write_watch_lower_page (size_t add) { return (add & ~(WRITE_WATCH_UNIT_SIZE - 1)); } inline uint8_t* align_write_watch_lower_page (uint8_t* add) { return (uint8_t*)align_lower_page ((size_t)add); } inline BOOL power_of_two_p (size_t integer) { return !(integer & (integer-1)); } inline BOOL oddp (size_t integer) { return (integer & 1) != 0; } // we only ever use this for WORDs. size_t logcount (size_t word) { //counts the number of high bits in a 16 bit word. assert (word < 0x10000); size_t count; count = (word & 0x5555) + ( (word >> 1 ) & 0x5555); count = (count & 0x3333) + ( (count >> 2) & 0x3333); count = (count & 0x0F0F) + ( (count >> 4) & 0x0F0F); count = (count & 0x00FF) + ( (count >> 8) & 0x00FF); return count; } void stomp_write_barrier_resize(bool is_runtime_suspended, bool requires_upper_bounds_check) { WriteBarrierParameters args = {}; args.operation = WriteBarrierOp::StompResize; args.is_runtime_suspended = is_runtime_suspended; args.requires_upper_bounds_check = requires_upper_bounds_check; args.card_table = g_gc_card_table; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES args.card_bundle_table = g_gc_card_bundle_table; #endif args.lowest_address = g_gc_lowest_address; args.highest_address = g_gc_highest_address; #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP if (SoftwareWriteWatch::IsEnabledForGCHeap()) { args.write_watch_table = g_gc_sw_ww_table; } #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP GCToEEInterface::StompWriteBarrier(&args); } void stomp_write_barrier_ephemeral(uint8_t* ephemeral_low, uint8_t* ephemeral_high) { initGCShadow(); WriteBarrierParameters args = {}; args.operation = WriteBarrierOp::StompEphemeral; args.is_runtime_suspended = true; args.ephemeral_low = ephemeral_low; args.ephemeral_high = ephemeral_high; GCToEEInterface::StompWriteBarrier(&args); } void stomp_write_barrier_initialize(uint8_t* ephemeral_low, uint8_t* ephemeral_high) { WriteBarrierParameters args = {}; args.operation = WriteBarrierOp::Initialize; args.is_runtime_suspended = true; args.requires_upper_bounds_check = false; args.card_table = g_gc_card_table; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES args.card_bundle_table = g_gc_card_bundle_table; #endif args.lowest_address = g_gc_lowest_address; args.highest_address = g_gc_highest_address; args.ephemeral_low = ephemeral_low; args.ephemeral_high = ephemeral_high; GCToEEInterface::StompWriteBarrier(&args); } //extract the low bits [0,low[ of a uint32_t #define lowbits(wrd, bits) ((wrd) & ((1 << (bits))-1)) //extract the high bits [high, 32] of a uint32_t #define highbits(wrd, bits) ((wrd) & ~((1 << (bits))-1)) // Things we need to manually initialize: // gen0 min_size - based on cache // gen0/1 max_size - based on segment size static static_data static_data_table[latency_level_last - latency_level_first + 1][total_generation_count] = { // latency_level_memory_footprint { // gen0 {0, 0, 40000, 0.5f, 9.0f, 20.0f, (1000 * 1000), 1}, // gen1 {160*1024, 0, 80000, 0.5f, 2.0f, 7.0f, (10 * 1000 * 1000), 10}, // gen2 {256*1024, SSIZE_T_MAX, 200000, 0.25f, 1.2f, 1.8f, (100 * 1000 * 1000), 100}, // loh {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0}, // poh {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0}, }, // latency_level_balanced { // gen0 {0, 0, 40000, 0.5f, #ifdef MULTIPLE_HEAPS 20.0f, 40.0f, #else 9.0f, 20.0f, #endif //MULTIPLE_HEAPS (1000 * 1000), 1}, // gen1 {256*1024, 0, 80000, 0.5f, 2.0f, 7.0f, (10 * 1000 * 1000), 10}, // gen2 {256*1024, SSIZE_T_MAX, 200000, 0.25f, 1.2f, 1.8f, (100 * 1000 * 1000), 100}, // loh {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0}, // poh {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0} }, }; class mark; class generation; class heap_segment; class CObjectHeader; class dynamic_data; class l_heap; class sorted_table; class c_synchronize; #ifdef FEATURE_PREMORTEM_FINALIZATION static HRESULT AllocateCFinalize(CFinalize **pCFinalize); #endif // FEATURE_PREMORTEM_FINALIZATION uint8_t* tree_search (uint8_t* tree, uint8_t* old_address); #ifdef USE_INTROSORT #define _sort introsort::sort #elif defined(USE_VXSORT) // in this case we have do_vxsort which takes an additional range that // all items to be sorted are contained in // so do not #define _sort #else //USE_INTROSORT #define _sort qsort1 void qsort1(uint8_t** low, uint8_t** high, unsigned int depth); #endif //USE_INTROSORT void* virtual_alloc (size_t size); void* virtual_alloc (size_t size, bool use_large_pages_p, uint16_t numa_node = NUMA_NODE_UNDEFINED); /* per heap static initialization */ #if defined(BACKGROUND_GC) && !defined(MULTIPLE_HEAPS) uint32_t* gc_heap::mark_array; #endif //BACKGROUND_GC && !MULTIPLE_HEAPS uint8_t** gc_heap::g_mark_list; uint8_t** gc_heap::g_mark_list_copy; size_t gc_heap::mark_list_size; bool gc_heap::mark_list_overflow; #ifdef USE_REGIONS uint8_t*** gc_heap::g_mark_list_piece; size_t gc_heap::g_mark_list_piece_size; #endif //USE_REGIONS seg_mapping* seg_mapping_table; #ifdef FEATURE_BASICFREEZE sorted_table* gc_heap::seg_table; #endif //FEATURE_BASICFREEZE #ifdef MULTIPLE_HEAPS GCEvent gc_heap::ee_suspend_event; size_t gc_heap::min_gen0_balance_delta = 0; size_t gc_heap::min_balance_threshold = 0; #endif //MULTIPLE_HEAPS VOLATILE(BOOL) gc_heap::gc_started; #ifdef MULTIPLE_HEAPS GCEvent gc_heap::gc_start_event; bool gc_heap::gc_thread_no_affinitize_p = false; uintptr_t process_mask = 0; int gc_heap::n_heaps; gc_heap** gc_heap::g_heaps; #if !defined(USE_REGIONS) || defined(_DEBUG) size_t* gc_heap::g_promoted; #endif //!USE_REGIONS || _DEBUG #ifdef MH_SC_MARK int* gc_heap::g_mark_stack_busy; #endif //MH_SC_MARK #ifdef BACKGROUND_GC size_t* gc_heap::g_bpromoted; #endif //BACKGROUND_GC BOOL gc_heap::gradual_decommit_in_progress_p = FALSE; size_t gc_heap::max_decommit_step_size = 0; #else //MULTIPLE_HEAPS #if !defined(USE_REGIONS) || defined(_DEBUG) size_t gc_heap::g_promoted; #endif //!USE_REGIONS || _DEBUG #ifdef BACKGROUND_GC size_t gc_heap::g_bpromoted; #endif //BACKGROUND_GC #endif //MULTIPLE_HEAPS size_t gc_heap::card_table_element_layout[total_bookkeeping_elements + 1]; #ifdef USE_REGIONS uint8_t* gc_heap::bookkeeping_covered_start = nullptr; uint8_t* gc_heap::bookkeeping_covered_committed = nullptr; size_t gc_heap::bookkeeping_sizes[total_bookkeeping_elements]; #endif //USE_REGIONS size_t gc_heap::reserved_memory = 0; size_t gc_heap::reserved_memory_limit = 0; BOOL gc_heap::g_low_memory_status; static gc_reason gc_trigger_reason = reason_empty; gc_latency_level gc_heap::latency_level = latency_level_default; gc_mechanisms gc_heap::settings; gc_history_global gc_heap::gc_data_global; uint64_t gc_heap::gc_last_ephemeral_decommit_time = 0; CLRCriticalSection gc_heap::check_commit_cs; size_t gc_heap::current_total_committed = 0; size_t gc_heap::committed_by_oh[total_oh_count] = {0, 0, 0, 0}; size_t gc_heap::current_total_committed_bookkeeping = 0; #ifdef FEATURE_EVENT_TRACE bool gc_heap::informational_event_enabled_p = false; uint64_t* gc_heap::gc_time_info = 0; #ifdef BACKGROUND_GC uint64_t* gc_heap::bgc_time_info = 0; #endif //BACKGROUND_GC size_t gc_heap::physical_memory_from_config = 0; size_t gc_heap::gen0_min_budget_from_config = 0; size_t gc_heap::gen0_max_budget_from_config = 0; int gc_heap::high_mem_percent_from_config = 0; bool gc_heap::use_frozen_segments_p = false; bool gc_heap::hard_limit_config_p = false; #ifdef FEATURE_LOH_COMPACTION gc_heap::etw_loh_compact_info* gc_heap::loh_compact_info; #endif //FEATURE_LOH_COMPACTION #endif //FEATURE_EVENT_TRACE #ifdef SHORT_PLUGS double gc_heap::short_plugs_pad_ratio = 0; #endif //SHORT_PLUGS int gc_heap::generation_skip_ratio_threshold = 0; int gc_heap::conserve_mem_setting = 0; uint64_t gc_heap::suspended_start_time = 0; uint64_t gc_heap::end_gc_time = 0; uint64_t gc_heap::total_suspended_time = 0; uint64_t gc_heap::process_start_time = 0; last_recorded_gc_info gc_heap::last_ephemeral_gc_info; last_recorded_gc_info gc_heap::last_full_blocking_gc_info; #ifdef BACKGROUND_GC last_recorded_gc_info gc_heap::last_bgc_info[2]; VOLATILE(bool) gc_heap::is_last_recorded_bgc = false; VOLATILE(int) gc_heap::last_bgc_info_index = 0; #endif //BACKGROUND_GC #if defined(HOST_64BIT) #define MAX_ALLOWED_MEM_LOAD 85 // consider putting this in dynamic data - // we may want different values for workstation // and server GC. #define MIN_YOUNGEST_GEN_DESIRED (16*1024*1024) size_t gc_heap::youngest_gen_desired_th; #endif //HOST_64BIT uint64_t gc_heap::mem_one_percent = 0; uint32_t gc_heap::high_memory_load_th = 0; uint32_t gc_heap::m_high_memory_load_th; uint32_t gc_heap::v_high_memory_load_th; bool gc_heap::is_restricted_physical_mem; uint64_t gc_heap::total_physical_mem = 0; uint64_t gc_heap::entry_available_physical_mem = 0; size_t gc_heap::heap_hard_limit = 0; size_t gc_heap::heap_hard_limit_oh[total_oh_count - 1] = {0, 0, 0}; #ifdef USE_REGIONS size_t gc_heap::regions_range = 0; #endif //USE_REGIONS bool affinity_config_specified_p = false; #ifdef USE_REGIONS region_allocator global_region_allocator; uint8_t*(*initial_regions)[total_generation_count][2] = nullptr; size_t gc_heap::region_count = 0; #endif //USE_REGIONS #ifdef BACKGROUND_GC GCEvent gc_heap::bgc_start_event; gc_mechanisms gc_heap::saved_bgc_settings; gc_history_global gc_heap::bgc_data_global; GCEvent gc_heap::background_gc_done_event; GCEvent gc_heap::ee_proceed_event; bool gc_heap::gc_can_use_concurrent = false; bool gc_heap::temp_disable_concurrent_p = false; uint32_t gc_heap::cm_in_progress = FALSE; BOOL gc_heap::dont_restart_ee_p = FALSE; BOOL gc_heap::keep_bgc_threads_p = FALSE; GCEvent gc_heap::bgc_threads_sync_event; BOOL gc_heap::do_ephemeral_gc_p = FALSE; BOOL gc_heap::do_concurrent_p = FALSE; size_t gc_heap::ephemeral_fgc_counts[max_generation]; BOOL gc_heap::alloc_wait_event_p = FALSE; VOLATILE(c_gc_state) gc_heap::current_c_gc_state = c_gc_state_free; VOLATILE(BOOL) gc_heap::gc_background_running = FALSE; #endif //BACKGROUND_GC #ifndef MULTIPLE_HEAPS #ifdef SPINLOCK_HISTORY int gc_heap::spinlock_info_index = 0; spinlock_info gc_heap::last_spinlock_info[max_saved_spinlock_info + 8]; #endif //SPINLOCK_HISTORY uint32_t gc_heap::fgn_maxgen_percent = 0; size_t gc_heap::fgn_last_alloc = 0; int gc_heap::generation_skip_ratio = 100; #ifdef FEATURE_CARD_MARKING_STEALING VOLATILE(size_t) gc_heap::n_eph_soh = 0; VOLATILE(size_t) gc_heap::n_gen_soh = 0; VOLATILE(size_t) gc_heap::n_eph_loh = 0; VOLATILE(size_t) gc_heap::n_gen_loh = 0; #endif //FEATURE_CARD_MARKING_STEALING uint64_t gc_heap::loh_alloc_since_cg = 0; BOOL gc_heap::elevation_requested = FALSE; BOOL gc_heap::last_gc_before_oom = FALSE; BOOL gc_heap::sufficient_gen0_space_p = FALSE; #ifdef BACKGROUND_GC uint8_t* gc_heap::background_saved_lowest_address = 0; uint8_t* gc_heap::background_saved_highest_address = 0; uint8_t* gc_heap::next_sweep_obj = 0; uint8_t* gc_heap::current_sweep_pos = 0; #ifdef DOUBLY_LINKED_FL heap_segment* gc_heap::current_sweep_seg = 0; #endif //DOUBLY_LINKED_FL exclusive_sync* gc_heap::bgc_alloc_lock; #endif //BACKGROUND_GC oom_history gc_heap::oom_info; int gc_heap::oomhist_index_per_heap = 0; oom_history gc_heap::oomhist_per_heap[max_oom_history_count]; fgm_history gc_heap::fgm_result; size_t gc_heap::allocated_since_last_gc[gc_oh_num::total_oh_count - 1]; BOOL gc_heap::ro_segments_in_range; #ifndef USE_REGIONS uint8_t* gc_heap::ephemeral_low; uint8_t* gc_heap::ephemeral_high; #endif //!USE_REGIONS uint8_t* gc_heap::lowest_address; uint8_t* gc_heap::highest_address; BOOL gc_heap::ephemeral_promotion; uint8_t* gc_heap::saved_ephemeral_plan_start[ephemeral_generation_count]; size_t gc_heap::saved_ephemeral_plan_start_size[ephemeral_generation_count]; short* gc_heap::brick_table; uint32_t* gc_heap::card_table; #ifdef CARD_BUNDLE uint32_t* gc_heap::card_bundle_table; #endif //CARD_BUNDLE uint8_t* gc_heap::gc_low = 0; uint8_t* gc_heap::gc_high = 0; #ifndef USE_REGIONS uint8_t* gc_heap::demotion_low; uint8_t* gc_heap::demotion_high; #endif //!USE_REGIONS BOOL gc_heap::demote_gen1_p = TRUE; uint8_t* gc_heap::last_gen1_pin_end; gen_to_condemn_tuning gc_heap::gen_to_condemn_reasons; size_t gc_heap::etw_allocation_running_amount[gc_oh_num::total_oh_count - 1]; uint64_t gc_heap::total_alloc_bytes_soh = 0; uint64_t gc_heap::total_alloc_bytes_uoh = 0; int gc_heap::gc_policy = 0; size_t gc_heap::allocation_running_time; size_t gc_heap::allocation_running_amount; heap_segment* gc_heap::ephemeral_heap_segment = 0; #ifdef USE_REGIONS #ifdef STRESS_REGIONS OBJECTHANDLE* gc_heap::pinning_handles_for_alloc = 0; int gc_heap::ph_index_per_heap = 0; int gc_heap::pinning_seg_interval = 2; size_t gc_heap::num_gen0_regions = 0; int gc_heap::sip_seg_interval = 0; int gc_heap::sip_seg_maxgen_interval = 0; size_t gc_heap::num_condemned_regions = 0; #endif //STRESS_REGIONS region_free_list gc_heap::free_regions[count_free_region_kinds]; int gc_heap::num_regions_freed_in_sweep = 0; int gc_heap::regions_per_gen[max_generation + 1]; int gc_heap::sip_maxgen_regions_per_gen[max_generation + 1]; heap_segment* gc_heap::reserved_free_regions_sip[max_generation]; int gc_heap::num_sip_regions = 0; size_t gc_heap::end_gen0_region_space = 0; size_t gc_heap::gen0_pinned_free_space = 0; bool gc_heap::gen0_large_chunk_found = false; size_t* gc_heap::survived_per_region = nullptr; size_t* gc_heap::old_card_survived_per_region = nullptr; #endif //USE_REGIONS BOOL gc_heap::blocking_collection = FALSE; heap_segment* gc_heap::freeable_uoh_segment = 0; uint64_t gc_heap::time_bgc_last = 0; size_t gc_heap::mark_stack_tos = 0; size_t gc_heap::mark_stack_bos = 0; size_t gc_heap::mark_stack_array_length = 0; mark* gc_heap::mark_stack_array = 0; #if defined (_DEBUG) && defined (VERIFY_HEAP) BOOL gc_heap::verify_pinned_queue_p = FALSE; #endif //_DEBUG && VERIFY_HEAP uint8_t* gc_heap::oldest_pinned_plug = 0; size_t gc_heap::num_pinned_objects = 0; #ifdef FEATURE_LOH_COMPACTION size_t gc_heap::loh_pinned_queue_tos = 0; size_t gc_heap::loh_pinned_queue_bos = 0; size_t gc_heap::loh_pinned_queue_length = 0; mark* gc_heap::loh_pinned_queue = 0; BOOL gc_heap::loh_compacted_p = FALSE; #endif //FEATURE_LOH_COMPACTION #ifdef BACKGROUND_GC EEThreadId gc_heap::bgc_thread_id; uint8_t* gc_heap::background_written_addresses [array_size+2]; heap_segment* gc_heap::freeable_soh_segment = 0; size_t gc_heap::bgc_overflow_count = 0; size_t gc_heap::bgc_begin_loh_size = 0; size_t gc_heap::end_loh_size = 0; size_t gc_heap::bgc_begin_poh_size = 0; size_t gc_heap::end_poh_size = 0; #ifdef BGC_SERVO_TUNING uint64_t gc_heap::loh_a_no_bgc = 0; uint64_t gc_heap::loh_a_bgc_marking = 0; uint64_t gc_heap::loh_a_bgc_planning = 0; size_t gc_heap::bgc_maxgen_end_fl_size = 0; #endif //BGC_SERVO_TUNING uint32_t gc_heap::bgc_alloc_spin_uoh = 0; size_t gc_heap::bgc_loh_size_increased = 0; size_t gc_heap::bgc_poh_size_increased = 0; size_t gc_heap::background_soh_size_end_mark = 0; size_t gc_heap::background_soh_alloc_count = 0; size_t gc_heap::background_uoh_alloc_count = 0; uint8_t** gc_heap::background_mark_stack_tos = 0; uint8_t** gc_heap::background_mark_stack_array = 0; size_t gc_heap::background_mark_stack_array_length = 0; BOOL gc_heap::processed_eph_overflow_p = FALSE; #ifdef USE_REGIONS BOOL gc_heap::background_overflow_p = FALSE; #else //USE_REGIONS uint8_t* gc_heap::background_min_overflow_address =0; uint8_t* gc_heap::background_max_overflow_address =0; uint8_t* gc_heap::background_min_soh_overflow_address =0; uint8_t* gc_heap::background_max_soh_overflow_address =0; heap_segment* gc_heap::saved_overflow_ephemeral_seg = 0; heap_segment* gc_heap::saved_sweep_ephemeral_seg = 0; uint8_t* gc_heap::saved_sweep_ephemeral_start = 0; #endif //USE_REGIONS Thread* gc_heap::bgc_thread = 0; uint8_t** gc_heap::c_mark_list = 0; size_t gc_heap::c_mark_list_length = 0; size_t gc_heap::c_mark_list_index = 0; gc_history_per_heap gc_heap::bgc_data_per_heap; BOOL gc_heap::bgc_thread_running; CLRCriticalSection gc_heap::bgc_threads_timeout_cs; #endif //BACKGROUND_GC uint8_t** gc_heap::mark_list; uint8_t** gc_heap::mark_list_index; uint8_t** gc_heap::mark_list_end; #ifdef SNOOP_STATS snoop_stats_data gc_heap::snoop_stat; #endif //SNOOP_STATS uint8_t* gc_heap::min_overflow_address = MAX_PTR; uint8_t* gc_heap::max_overflow_address = 0; uint8_t* gc_heap::shigh = 0; uint8_t* gc_heap::slow = MAX_PTR; size_t gc_heap::ordered_free_space_indices[MAX_NUM_BUCKETS]; size_t gc_heap::saved_ordered_free_space_indices[MAX_NUM_BUCKETS]; size_t gc_heap::ordered_plug_indices[MAX_NUM_BUCKETS]; size_t gc_heap::saved_ordered_plug_indices[MAX_NUM_BUCKETS]; BOOL gc_heap::ordered_plug_indices_init = FALSE; BOOL gc_heap::use_bestfit = FALSE; uint8_t* gc_heap::bestfit_first_pin = 0; BOOL gc_heap::commit_end_of_seg = FALSE; size_t gc_heap::max_free_space_items = 0; size_t gc_heap::free_space_buckets = 0; size_t gc_heap::free_space_items = 0; int gc_heap::trimmed_free_space_index = 0; size_t gc_heap::total_ephemeral_plugs = 0; seg_free_spaces* gc_heap::bestfit_seg = 0; size_t gc_heap::total_ephemeral_size = 0; #ifdef HEAP_ANALYZE size_t gc_heap::internal_root_array_length = initial_internal_roots; uint8_t** gc_heap::internal_root_array = 0; size_t gc_heap::internal_root_array_index = 0; BOOL gc_heap::heap_analyze_success = TRUE; uint8_t* gc_heap::current_obj = 0; size_t gc_heap::current_obj_size = 0; #endif //HEAP_ANALYZE #ifdef GC_CONFIG_DRIVEN size_t gc_heap::interesting_data_per_gc[max_idp_count]; //size_t gc_heap::interesting_data_per_heap[max_idp_count]; //size_t gc_heap::interesting_mechanisms_per_heap[max_im_count]; #endif //GC_CONFIG_DRIVEN #endif //MULTIPLE_HEAPS no_gc_region_info gc_heap::current_no_gc_region_info; BOOL gc_heap::proceed_with_gc_p = FALSE; GCSpinLock gc_heap::gc_lock; #ifdef BGC_SERVO_TUNING uint64_t gc_heap::total_loh_a_last_bgc = 0; #endif //BGC_SERVO_TUNING size_t gc_heap::eph_gen_starts_size = 0; heap_segment* gc_heap::segment_standby_list; #if defined(USE_REGIONS) region_free_list gc_heap::global_regions_to_decommit[count_free_region_kinds]; region_free_list gc_heap::global_free_huge_regions; #endif //USE_REGIONS bool gc_heap::use_large_pages_p = 0; #ifdef HEAP_BALANCE_INSTRUMENTATION size_t gc_heap::last_gc_end_time_us = 0; #endif //HEAP_BALANCE_INSTRUMENTATION #ifndef USE_REGIONS size_t gc_heap::min_segment_size = 0; size_t gc_heap::min_uoh_segment_size = 0; #endif //!USE_REGIONS size_t gc_heap::min_segment_size_shr = 0; size_t gc_heap::soh_segment_size = 0; size_t gc_heap::segment_info_size = 0; #ifdef GC_CONFIG_DRIVEN size_t gc_heap::compact_or_sweep_gcs[2]; #endif //GC_CONFIG_DRIVEN #ifdef FEATURE_LOH_COMPACTION BOOL gc_heap::loh_compaction_always_p = FALSE; gc_loh_compaction_mode gc_heap::loh_compaction_mode = loh_compaction_default; int gc_heap::loh_pinned_queue_decay = LOH_PIN_DECAY; #endif //FEATURE_LOH_COMPACTION GCEvent gc_heap::full_gc_approach_event; GCEvent gc_heap::full_gc_end_event; uint32_t gc_heap::fgn_loh_percent = 0; #ifdef BACKGROUND_GC BOOL gc_heap::fgn_last_gc_was_concurrent = FALSE; #endif //BACKGROUND_GC VOLATILE(bool) gc_heap::full_gc_approach_event_set; bool gc_heap::special_sweep_p = false; size_t gc_heap::full_gc_counts[gc_type_max]; bool gc_heap::maxgen_size_inc_p = false; BOOL gc_heap::should_expand_in_full_gc = FALSE; // Provisional mode related stuff. bool gc_heap::provisional_mode_triggered = false; bool gc_heap::pm_trigger_full_gc = false; size_t gc_heap::provisional_triggered_gc_count = 0; size_t gc_heap::provisional_off_gc_count = 0; size_t gc_heap::num_provisional_triggered = 0; bool gc_heap::pm_stress_on = false; #ifdef HEAP_ANALYZE BOOL gc_heap::heap_analyze_enabled = FALSE; #endif //HEAP_ANALYZE #ifndef MULTIPLE_HEAPS alloc_list gc_heap::loh_alloc_list [NUM_LOH_ALIST-1]; alloc_list gc_heap::gen2_alloc_list[NUM_GEN2_ALIST-1]; alloc_list gc_heap::poh_alloc_list [NUM_POH_ALIST-1]; #ifdef DOUBLY_LINKED_FL // size we removed with no undo; only for recording purpose size_t gc_heap::gen2_removed_no_undo = 0; size_t gc_heap::saved_pinned_plug_index = INVALID_SAVED_PINNED_PLUG_INDEX; #endif //DOUBLY_LINKED_FL #ifdef FEATURE_EVENT_TRACE etw_bucket_info gc_heap::bucket_info[NUM_GEN2_ALIST]; #endif //FEATURE_EVENT_TRACE dynamic_data gc_heap::dynamic_data_table [total_generation_count]; gc_history_per_heap gc_heap::gc_data_per_heap; size_t gc_heap::total_promoted_bytes = 0; size_t gc_heap::finalization_promoted_bytes = 0; size_t gc_heap::maxgen_pinned_compact_before_advance = 0; uint8_t* gc_heap::alloc_allocated = 0; size_t gc_heap::allocation_quantum = CLR_SIZE; GCSpinLock gc_heap::more_space_lock_soh; GCSpinLock gc_heap::more_space_lock_uoh; #ifdef BACKGROUND_GC VOLATILE(int32_t) gc_heap::uoh_alloc_thread_count = 0; #endif //BACKGROUND_GC #ifdef SYNCHRONIZATION_STATS unsigned int gc_heap::good_suspension = 0; unsigned int gc_heap::bad_suspension = 0; uint64_t gc_heap::total_msl_acquire = 0; unsigned int gc_heap::num_msl_acquired = 0; unsigned int gc_heap::num_high_msl_acquire = 0; unsigned int gc_heap::num_low_msl_acquire = 0; #endif //SYNCHRONIZATION_STATS size_t gc_heap::alloc_contexts_used = 0; size_t gc_heap::soh_allocation_no_gc = 0; size_t gc_heap::loh_allocation_no_gc = 0; bool gc_heap::no_gc_oom_p = false; heap_segment* gc_heap::saved_loh_segment_no_gc = 0; #endif //MULTIPLE_HEAPS #ifndef MULTIPLE_HEAPS BOOL gc_heap::gen0_bricks_cleared = FALSE; int gc_heap::gen0_must_clear_bricks = 0; #ifdef FEATURE_PREMORTEM_FINALIZATION CFinalize* gc_heap::finalize_queue = 0; #endif // FEATURE_PREMORTEM_FINALIZATION #ifdef FEATURE_CARD_MARKING_STEALING VOLATILE(uint32_t) gc_heap::card_mark_chunk_index_soh; VOLATILE(bool) gc_heap::card_mark_done_soh; VOLATILE(uint32_t) gc_heap::card_mark_chunk_index_loh; VOLATILE(uint32_t) gc_heap::card_mark_chunk_index_poh; VOLATILE(bool) gc_heap::card_mark_done_uoh; #endif // FEATURE_CARD_MARKING_STEALING generation gc_heap::generation_table [total_generation_count]; size_t gc_heap::interesting_data_per_heap[max_idp_count]; size_t gc_heap::compact_reasons_per_heap[max_compact_reasons_count]; size_t gc_heap::expand_mechanisms_per_heap[max_expand_mechanisms_count]; size_t gc_heap::interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count]; #endif // MULTIPLE_HEAPS /* end of per heap static initialization */ // budget smoothing size_t gc_heap::smoothed_desired_per_heap[total_generation_count]; /* end of static initialization */ // This is for methods that need to iterate through all SOH heap segments/regions. inline int get_start_generation_index() { #ifdef USE_REGIONS return 0; #else return max_generation; #endif //USE_REGIONS } inline int get_stop_generation_index (int condemned_gen_number) { #ifdef USE_REGIONS return 0; #else return condemned_gen_number; #endif //USE_REGIONS } void gen_to_condemn_tuning::print (int heap_num) { #ifdef DT_LOG dprintf (DT_LOG_0, ("condemned reasons (%d %d)", condemn_reasons_gen, condemn_reasons_condition)); dprintf (DT_LOG_0, ("%s", record_condemn_reasons_gen_header)); gc_condemn_reason_gen r_gen; for (int i = 0; i < gcrg_max; i++) { r_gen = (gc_condemn_reason_gen)(i); str_reasons_gen[i * 2] = get_gen_char (get_gen (r_gen)); } dprintf (DT_LOG_0, ("[%2d]%s", heap_num, str_reasons_gen)); dprintf (DT_LOG_0, ("%s", record_condemn_reasons_condition_header)); gc_condemn_reason_condition r_condition; for (int i = 0; i < gcrc_max; i++) { r_condition = (gc_condemn_reason_condition)(i); str_reasons_condition[i * 2] = get_condition_char (get_condition (r_condition)); } dprintf (DT_LOG_0, ("[%2d]%s", heap_num, str_reasons_condition)); #else UNREFERENCED_PARAMETER(heap_num); #endif //DT_LOG } void gc_generation_data::print (int heap_num, int gen_num) { #if defined(SIMPLE_DPRINTF) && defined(DT_LOG) dprintf (DT_LOG_0, ("[%2d]gen%d beg %Id fl %Id fo %Id end %Id fl %Id fo %Id in %Id p %Id np %Id alloc %Id", heap_num, gen_num, size_before, free_list_space_before, free_obj_space_before, size_after, free_list_space_after, free_obj_space_after, in, pinned_surv, npinned_surv, new_allocation)); #else UNREFERENCED_PARAMETER(heap_num); UNREFERENCED_PARAMETER(gen_num); #endif //SIMPLE_DPRINTF && DT_LOG } void gc_history_per_heap::set_mechanism (gc_mechanism_per_heap mechanism_per_heap, uint32_t value) { uint32_t* mechanism = &mechanisms[mechanism_per_heap]; *mechanism = 0; *mechanism |= mechanism_mask; *mechanism |= (1 << value); #ifdef DT_LOG gc_mechanism_descr* descr = &gc_mechanisms_descr[mechanism_per_heap]; dprintf (DT_LOG_0, ("setting %s: %s", descr->name, (descr->descr)[value])); #endif //DT_LOG } void gc_history_per_heap::print() { #if defined(SIMPLE_DPRINTF) && defined(DT_LOG) for (int i = 0; i < (sizeof (gen_data)/sizeof (gc_generation_data)); i++) { gen_data[i].print (heap_index, i); } dprintf (DT_LOG_0, ("fla %Id flr %Id esa %Id ca %Id pa %Id paa %Id, rfle %d, ec %Id", maxgen_size_info.free_list_allocated, maxgen_size_info.free_list_rejected, maxgen_size_info.end_seg_allocated, maxgen_size_info.condemned_allocated, maxgen_size_info.pinned_allocated, maxgen_size_info.pinned_allocated_advance, maxgen_size_info.running_free_list_efficiency, extra_gen0_committed)); int mechanism = 0; gc_mechanism_descr* descr = 0; for (int i = 0; i < max_mechanism_per_heap; i++) { mechanism = get_mechanism ((gc_mechanism_per_heap)i); if (mechanism >= 0) { descr = &gc_mechanisms_descr[(gc_mechanism_per_heap)i]; dprintf (DT_LOG_0, ("[%2d]%s%s", heap_index, descr->name, (descr->descr)[mechanism])); } } #endif //SIMPLE_DPRINTF && DT_LOG } void gc_history_global::print() { #ifdef DT_LOG char str_settings[64]; memset (str_settings, '|', sizeof (char) * 64); str_settings[max_global_mechanisms_count*2] = 0; for (int i = 0; i < max_global_mechanisms_count; i++) { str_settings[i * 2] = (get_mechanism_p ((gc_global_mechanism_p)i) ? 'Y' : 'N'); } dprintf (DT_LOG_0, ("[hp]|c|p|o|d|b|e|")); dprintf (DT_LOG_0, ("%4d|%s", num_heaps, str_settings)); dprintf (DT_LOG_0, ("Condemned gen%d(reason: %s; mode: %s), youngest budget %Id(%d), memload %d", condemned_generation, str_gc_reasons[reason], str_gc_pause_modes[pause_mode], final_youngest_desired, gen0_reduction_count, mem_pressure)); #endif //DT_LOG } uint32_t limit_time_to_uint32 (uint64_t time) { time = min (time, UINT32_MAX); return (uint32_t)time; } void gc_heap::fire_per_heap_hist_event (gc_history_per_heap* current_gc_data_per_heap, int heap_num) { maxgen_size_increase* maxgen_size_info = &(current_gc_data_per_heap->maxgen_size_info); FIRE_EVENT(GCPerHeapHistory_V3, (void *)(maxgen_size_info->free_list_allocated), (void *)(maxgen_size_info->free_list_rejected), (void *)(maxgen_size_info->end_seg_allocated), (void *)(maxgen_size_info->condemned_allocated), (void *)(maxgen_size_info->pinned_allocated), (void *)(maxgen_size_info->pinned_allocated_advance), maxgen_size_info->running_free_list_efficiency, current_gc_data_per_heap->gen_to_condemn_reasons.get_reasons0(), current_gc_data_per_heap->gen_to_condemn_reasons.get_reasons1(), current_gc_data_per_heap->mechanisms[gc_heap_compact], current_gc_data_per_heap->mechanisms[gc_heap_expand], current_gc_data_per_heap->heap_index, (void *)(current_gc_data_per_heap->extra_gen0_committed), total_generation_count, (uint32_t)(sizeof (gc_generation_data)), (void *)&(current_gc_data_per_heap->gen_data[0])); current_gc_data_per_heap->print(); current_gc_data_per_heap->gen_to_condemn_reasons.print (heap_num); } void gc_heap::fire_pevents() { gc_history_global* current_gc_data_global = get_gc_data_global(); settings.record (current_gc_data_global); current_gc_data_global->print(); #ifdef FEATURE_EVENT_TRACE if (!informational_event_enabled_p) return; uint32_t count_time_info = (settings.concurrent ? max_bgc_time_type : (settings.compaction ? max_compact_time_type : max_sweep_time_type)); #ifdef BACKGROUND_GC uint64_t* time_info = (settings.concurrent ? bgc_time_info : gc_time_info); #else uint64_t* time_info = gc_time_info; #endif //BACKGROUND_GC // We don't want to have to fire the time info as 64-bit integers as there's no need to // so compress them down to 32-bit ones. uint32_t* time_info_32 = (uint32_t*)time_info; for (uint32_t i = 0; i < count_time_info; i++) { time_info_32[i] = limit_time_to_uint32 (time_info[i]); } FIRE_EVENT(GCGlobalHeapHistory_V4, current_gc_data_global->final_youngest_desired, current_gc_data_global->num_heaps, current_gc_data_global->condemned_generation, current_gc_data_global->gen0_reduction_count, current_gc_data_global->reason, current_gc_data_global->global_mechanisms_p, current_gc_data_global->pause_mode, current_gc_data_global->mem_pressure, current_gc_data_global->gen_to_condemn_reasons.get_reasons0(), current_gc_data_global->gen_to_condemn_reasons.get_reasons1(), count_time_info, (uint32_t)(sizeof (uint32_t)), (void*)time_info_32); #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; gc_history_per_heap* current_gc_data_per_heap = hp->get_gc_data_per_heap(); fire_per_heap_hist_event (current_gc_data_per_heap, hp->heap_number); } #else gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap(); fire_per_heap_hist_event (current_gc_data_per_heap, heap_number); #endif //MULTIPLE_HEAPS #ifdef FEATURE_LOH_COMPACTION if (!settings.concurrent && settings.loh_compaction) { // Not every heap will compact LOH, the ones that didn't will just have 0s // in its info. FIRE_EVENT(GCLOHCompact, get_num_heaps(), (uint32_t)(sizeof (etw_loh_compact_info)), (void *)loh_compact_info); } #endif //FEATURE_LOH_COMPACTION #endif //FEATURE_EVENT_TRACE } inline BOOL gc_heap::dt_low_ephemeral_space_p (gc_tuning_point tp) { BOOL ret = FALSE; switch (tp) { case tuning_deciding_condemned_gen: #ifndef USE_REGIONS case tuning_deciding_compaction: case tuning_deciding_expansion: #endif //USE_REGIONS case tuning_deciding_full_gc: { ret = (!ephemeral_gen_fit_p (tp)); break; } #ifndef USE_REGIONS case tuning_deciding_promote_ephemeral: { size_t new_gen0size = approximate_new_allocation(); ptrdiff_t plan_ephemeral_size = total_ephemeral_size; dprintf (GTC_LOG, ("h%d: plan eph size is %Id, new gen0 is %Id", heap_number, plan_ephemeral_size, new_gen0size)); // If we were in no_gc_region we could have allocated a larger than normal segment, // and the next seg we allocate will be a normal sized seg so if we can't fit the new // ephemeral generations there, do an ephemeral promotion. ret = ((soh_segment_size - segment_info_size) < (plan_ephemeral_size + new_gen0size)); break; } #endif //USE_REGIONS default: { assert (!"invalid tuning reason"); break; } } return ret; } BOOL gc_heap::dt_high_frag_p (gc_tuning_point tp, int gen_number, BOOL elevate_p) { BOOL ret = FALSE; switch (tp) { case tuning_deciding_condemned_gen: { dynamic_data* dd = dynamic_data_of (gen_number); float fragmentation_burden = 0; if (elevate_p) { ret = (dd_fragmentation (dynamic_data_of (max_generation)) >= dd_max_size(dd)); dprintf (GTC_LOG, ("h%d: frag is %Id, max size is %Id", heap_number, dd_fragmentation (dd), dd_max_size(dd))); } else { #ifndef MULTIPLE_HEAPS if (gen_number == max_generation) { float frag_ratio = (float)(dd_fragmentation (dynamic_data_of (max_generation))) / (float)generation_size (max_generation); if (frag_ratio > 0.65) { dprintf (GTC_LOG, ("g2 FR: %d%%", (int)(frag_ratio*100))); return TRUE; } } #endif //!MULTIPLE_HEAPS size_t fr = generation_unusable_fragmentation (generation_of (gen_number)); ret = (fr > dd_fragmentation_limit(dd)); if (ret) { fragmentation_burden = (float)fr / generation_size (gen_number); ret = (fragmentation_burden > dd_v_fragmentation_burden_limit (dd)); } dprintf (GTC_LOG, ("h%d: gen%d, frag is %Id, alloc effi: %d%%, unusable frag is %Id, ratio is %d", heap_number, gen_number, dd_fragmentation (dd), (int)(100*generation_allocator_efficiency (generation_of (gen_number))), fr, (int)(fragmentation_burden*100))); } break; } default: break; } return ret; } inline BOOL gc_heap::dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number) { BOOL ret = FALSE; switch (tp) { case tuning_deciding_condemned_gen: { if (gen_number == max_generation) { size_t est_maxgen_free = estimated_reclaim (gen_number); uint32_t num_heaps = 1; #ifdef MULTIPLE_HEAPS num_heaps = gc_heap::n_heaps; #endif //MULTIPLE_HEAPS size_t min_frag_th = min_reclaim_fragmentation_threshold (num_heaps); dprintf (GTC_LOG, ("h%d, min frag is %Id", heap_number, min_frag_th)); ret = (est_maxgen_free >= min_frag_th); } else { assert (0); } break; } default: break; } return ret; } // DTREVIEW: Right now we only estimate gen2 fragmentation. // on 64-bit though we should consider gen1 or even gen0 fragmentation as // well inline BOOL gc_heap::dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, uint64_t available_mem) { BOOL ret = FALSE; switch (tp) { case tuning_deciding_condemned_gen: { if (gen_number == max_generation) { dynamic_data* dd = dynamic_data_of (gen_number); float est_frag_ratio = 0; if (dd_current_size (dd) == 0) { est_frag_ratio = 1; } else if ((dd_fragmentation (dd) == 0) || (dd_fragmentation (dd) + dd_current_size (dd) == 0)) { est_frag_ratio = 0; } else { est_frag_ratio = (float)dd_fragmentation (dd) / (float)(dd_fragmentation (dd) + dd_current_size (dd)); } size_t est_frag = (dd_fragmentation (dd) + (size_t)((dd_desired_allocation (dd) - dd_new_allocation (dd)) * est_frag_ratio)); dprintf (GTC_LOG, ("h%d: gen%d: current_size is %Id, frag is %Id, est_frag_ratio is %d%%, estimated frag is %Id", heap_number, gen_number, dd_current_size (dd), dd_fragmentation (dd), (int)(est_frag_ratio*100), est_frag)); uint32_t num_heaps = 1; #ifdef MULTIPLE_HEAPS num_heaps = gc_heap::n_heaps; #endif //MULTIPLE_HEAPS uint64_t min_frag_th = min_high_fragmentation_threshold(available_mem, num_heaps); //dprintf (GTC_LOG, ("h%d, min frag is %I64d", heap_number, min_frag_th)); ret = (est_frag >= min_frag_th); } else { assert (0); } break; } default: break; } return ret; } inline BOOL gc_heap::dt_low_card_table_efficiency_p (gc_tuning_point tp) { BOOL ret = FALSE; switch (tp) { case tuning_deciding_condemned_gen: { /* promote into max-generation if the card table has too many * generation faults besides the n -> 0 */ ret = (generation_skip_ratio < generation_skip_ratio_threshold); break; } default: break; } return ret; } inline BOOL gc_heap::dt_high_memory_load_p() { return ((settings.entry_memory_load >= high_memory_load_th) || g_low_memory_status); } inline BOOL in_range_for_segment(uint8_t* add, heap_segment* seg) { return ((add >= heap_segment_mem (seg)) && (add < heap_segment_reserved (seg))); } #ifdef FEATURE_BASICFREEZE // The array we allocate is organized as follows: // 0th element is the address of the last array we allocated. // starting from the 1st element are the segment addresses, that's // what buckets() returns. struct bk { uint8_t* add; size_t val; }; class sorted_table { private: ptrdiff_t size; ptrdiff_t count; bk* slots; bk* buckets() { return (slots + 1); } uint8_t*& last_slot (bk* arr) { return arr[0].add; } bk* old_slots; public: static sorted_table* make_sorted_table (); BOOL insert (uint8_t* add, size_t val);; size_t lookup (uint8_t*& add); void remove (uint8_t* add); void clear (); void delete_sorted_table(); void delete_old_slots(); void enqueue_old_slot(bk* sl); BOOL ensure_space_for_insert(); }; sorted_table* sorted_table::make_sorted_table () { size_t size = 400; // allocate one more bk to store the older slot address. sorted_table* res = (sorted_table*)new (nothrow) char [sizeof (sorted_table) + (size + 1) * sizeof (bk)]; if (!res) return 0; res->size = size; res->slots = (bk*)(res + 1); res->old_slots = 0; res->clear(); return res; } void sorted_table::delete_sorted_table() { if (slots != (bk*)(this+1)) { delete slots; } delete_old_slots(); delete this; } void sorted_table::delete_old_slots() { uint8_t* sl = (uint8_t*)old_slots; while (sl) { uint8_t* dsl = sl; sl = last_slot ((bk*)sl); delete dsl; } old_slots = 0; } void sorted_table::enqueue_old_slot(bk* sl) { last_slot (sl) = (uint8_t*)old_slots; old_slots = sl; } inline size_t sorted_table::lookup (uint8_t*& add) { ptrdiff_t high = (count-1); ptrdiff_t low = 0; ptrdiff_t ti; ptrdiff_t mid; bk* buck = buckets(); while (low <= high) { mid = ((low + high)/2); ti = mid; if (buck[ti].add > add) { if ((ti > 0) && (buck[ti-1].add <= add)) { add = buck[ti-1].add; return buck[ti - 1].val; } high = mid - 1; } else { if (buck[ti+1].add > add) { add = buck[ti].add; return buck[ti].val; } low = mid + 1; } } add = 0; return 0; } BOOL sorted_table::ensure_space_for_insert() { if (count == size) { size = (size * 3)/2; assert((size * sizeof (bk)) > 0); bk* res = (bk*)new (nothrow) char [(size + 1) * sizeof (bk)]; assert (res); if (!res) return FALSE; last_slot (res) = 0; memcpy (((bk*)res + 1), buckets(), count * sizeof (bk)); bk* last_old_slots = slots; slots = res; if (last_old_slots != (bk*)(this + 1)) enqueue_old_slot (last_old_slots); } return TRUE; } BOOL sorted_table::insert (uint8_t* add, size_t val) { //grow if no more room assert (count < size); //insert sorted ptrdiff_t high = (count-1); ptrdiff_t low = 0; ptrdiff_t ti; ptrdiff_t mid; bk* buck = buckets(); while (low <= high) { mid = ((low + high)/2); ti = mid; if (buck[ti].add > add) { if ((ti == 0) || (buck[ti-1].add <= add)) { // found insertion point for (ptrdiff_t k = count; k > ti;k--) { buck [k] = buck [k-1]; } buck[ti].add = add; buck[ti].val = val; count++; return TRUE; } high = mid - 1; } else { if (buck[ti+1].add > add) { //found the insertion point for (ptrdiff_t k = count; k > ti+1;k--) { buck [k] = buck [k-1]; } buck[ti+1].add = add; buck[ti+1].val = val; count++; return TRUE; } low = mid + 1; } } assert (0); return TRUE; } void sorted_table::remove (uint8_t* add) { ptrdiff_t high = (count-1); ptrdiff_t low = 0; ptrdiff_t ti; ptrdiff_t mid; bk* buck = buckets(); while (low <= high) { mid = ((low + high)/2); ti = mid; if (buck[ti].add > add) { if (buck[ti-1].add <= add) { for (ptrdiff_t k = ti; k < count; k++) buck[k-1] = buck[k]; count--; return; } high = mid - 1; } else { if (buck[ti+1].add > add) { for (ptrdiff_t k = ti+1; k < count; k++) buck[k-1] = buck[k]; count--; return; } low = mid + 1; } } assert (0); } void sorted_table::clear() { count = 1; buckets()[0].add = MAX_PTR; } #endif //FEATURE_BASICFREEZE #ifdef USE_REGIONS inline size_t get_basic_region_index_for_address (uint8_t* address) { size_t basic_region_index = (size_t)address >> gc_heap::min_segment_size_shr; return (basic_region_index - ((size_t)g_gc_lowest_address >> gc_heap::min_segment_size_shr)); } // Go from a random address to its region info. The random address could be // in one of the basic regions of a larger region so we need to check for that. inline heap_segment* get_region_info_for_address (uint8_t* address) { size_t basic_region_index = (size_t)address >> gc_heap::min_segment_size_shr; heap_segment* basic_region_info_entry = (heap_segment*)&seg_mapping_table[basic_region_index]; ptrdiff_t first_field = (ptrdiff_t)heap_segment_allocated (basic_region_info_entry); if (first_field < 0) { basic_region_index += first_field; } return ((heap_segment*)(&seg_mapping_table[basic_region_index])); } // Go from the physical start of a region to its region info. inline heap_segment* get_region_info (uint8_t* region_start) { size_t region_index = (size_t)region_start >> gc_heap::min_segment_size_shr; heap_segment* region_info_entry = (heap_segment*)&seg_mapping_table[region_index]; dprintf (REGIONS_LOG, ("region info for region %Ix is at %Id, %Ix (alloc: %Ix)", region_start, region_index, (size_t)region_info_entry, heap_segment_allocated (region_info_entry))); return (heap_segment*)&seg_mapping_table[region_index]; } // Go from the actual region info to its region start. inline uint8_t* get_region_start (heap_segment* region_info) { uint8_t* obj_start = heap_segment_mem (region_info); return (obj_start - sizeof (aligned_plug_and_gap)); } inline size_t get_region_size (heap_segment* region_info) { return (size_t)(heap_segment_reserved (region_info) - get_region_start (region_info)); } inline size_t get_region_committed_size (heap_segment* region) { uint8_t* start = get_region_start (region); uint8_t* committed = heap_segment_committed (region); return committed - start; } inline bool is_free_region (heap_segment* region) { return (heap_segment_allocated (region) == nullptr); } bool region_allocator::init (uint8_t* start, uint8_t* end, size_t alignment, uint8_t** lowest, uint8_t** highest) { uint8_t* actual_start = start; region_alignment = alignment; large_region_alignment = LARGE_REGION_FACTOR * alignment; global_region_start = (uint8_t*)align_region_up ((size_t)actual_start); uint8_t* actual_end = end; global_region_end = (uint8_t*)align_region_down ((size_t)actual_end); global_region_left_used = global_region_start; global_region_right_used = global_region_end; // Note: I am allocating a map that covers the whole reserved range. // We can optimize it to only cover the current heap range. size_t total_num_units = (global_region_end - global_region_start) / region_alignment; total_free_units = (uint32_t)total_num_units; uint32_t* unit_map = new (nothrow) uint32_t[total_num_units]; if (unit_map) { memset (unit_map, 0, sizeof (uint32_t) * total_num_units); region_map_left_start = unit_map; region_map_left_end = region_map_left_start; region_map_right_start = unit_map + total_num_units; region_map_right_end = region_map_right_start; dprintf (REGIONS_LOG, ("start: %Ix, end: %Ix, total %Idmb(alignment: %Idmb), map units %d", (size_t)start, (size_t)end, (size_t)((end - start) / 1024 / 1024), (alignment / 1024 / 1024), total_num_units)); *lowest = global_region_start; *highest = global_region_end; } return (unit_map != 0); } inline uint8_t* region_allocator::region_address_of (uint32_t* map_index) { return (global_region_start + ((map_index - region_map_left_start) * region_alignment)); } inline uint32_t* region_allocator::region_map_index_of (uint8_t* address) { return (region_map_left_start + ((address - global_region_start) / region_alignment)); } void region_allocator::make_busy_block (uint32_t* index_start, uint32_t num_units) { #ifdef _DEBUG dprintf (REGIONS_LOG, ("MBB[B: %Id] %d->%d", (size_t)num_units, (int)(index_start - region_map_left_start), (int)(index_start - region_map_left_start + num_units))); #endif //_DEBUG ASSERT_HOLDING_SPIN_LOCK (&region_allocator_lock); uint32_t* index_end = index_start + (num_units - 1); *index_start = *index_end = num_units; } void region_allocator::make_free_block (uint32_t* index_start, uint32_t num_units) { #ifdef _DEBUG dprintf (REGIONS_LOG, ("MFB[F: %Id] %d->%d", (size_t)num_units, (int)(index_start - region_map_left_start), (int)(index_start - region_map_left_start + num_units))); #endif //_DEBUG ASSERT_HOLDING_SPIN_LOCK (&region_allocator_lock); uint32_t* index_end = index_start + (num_units - 1); *index_start = *index_end = region_alloc_free_bit | num_units; } void region_allocator::print_map (const char* msg) { ASSERT_HOLDING_SPIN_LOCK (&region_allocator_lock); #ifdef _DEBUG const char* heap_type = "UH"; dprintf (REGIONS_LOG, ("[%s]-----printing----%s", heap_type, msg)); uint32_t* current_index = region_map_left_start; uint32_t* end_index = region_map_left_end; uint32_t count_free_units = 0; for (int i = 0; i < 2; i++) { while (current_index < end_index) { uint32_t current_val = *current_index; uint32_t current_num_units = get_num_units (current_val); bool free_p = is_unit_memory_free (current_val); dprintf (REGIONS_LOG, ("[%s][%s: %Id]%d->%d", heap_type, (free_p ? "F" : "B"), (size_t)current_num_units, (int)(current_index - region_map_left_start), (int)(current_index - region_map_left_start + current_num_units))); if (free_p) { count_free_units += current_num_units; } current_index += current_num_units; } current_index = region_map_right_start; end_index = region_map_right_end; } count_free_units += (uint32_t)(region_map_right_start - region_map_left_end); assert(count_free_units == total_free_units); uint32_t total_regions = (uint32_t)((global_region_end - global_region_start) / region_alignment); dprintf (REGIONS_LOG, ("[%s]-----end printing----[%d total, left used %d, right used %d]\n", heap_type, total_regions, (region_map_left_end - region_map_left_start), (region_map_right_end - region_map_right_start))); #endif //_DEBUG } uint8_t* region_allocator::allocate_end (uint32_t num_units, allocate_direction direction) { uint8_t* alloc = NULL; ASSERT_HOLDING_SPIN_LOCK (&region_allocator_lock); if (global_region_left_used < global_region_right_used) { size_t end_remaining = global_region_right_used - global_region_left_used; if ((end_remaining / region_alignment) >= num_units) { if (direction == allocate_forward) { make_busy_block (region_map_left_end, num_units); region_map_left_end += num_units; alloc = global_region_left_used; global_region_left_used += num_units * region_alignment; } else { assert(direction == allocate_backward); region_map_right_start -= num_units; make_busy_block (region_map_right_start, num_units); global_region_right_used -= num_units * region_alignment; alloc = global_region_right_used; } } } return alloc; } void region_allocator::enter_spin_lock() { while (true) { if (Interlocked::CompareExchange(&region_allocator_lock.lock, 0, -1) < 0) break; while (region_allocator_lock.lock >= 0) { YieldProcessor(); // indicate to the processor that we are spinning } } #ifdef _DEBUG region_allocator_lock.holding_thread = GCToEEInterface::GetThread(); #endif //_DEBUG } void region_allocator::leave_spin_lock() { #ifdef _DEBUG region_allocator_lock.holding_thread = (Thread*)-1; #endif //_DEBUG region_allocator_lock.lock = -1; } uint8_t* region_allocator::allocate (uint32_t num_units, allocate_direction direction, region_allocator_callback_fn fn) { enter_spin_lock(); uint32_t* current_index; uint32_t* end_index; if (direction == allocate_forward) { current_index = region_map_left_start; end_index = region_map_left_end; } else { assert(direction == allocate_backward); current_index = region_map_right_end; end_index = region_map_right_start; } dprintf (REGIONS_LOG, ("searching %d->%d", (int)(current_index - region_map_left_start), (int)(end_index - region_map_left_start))); print_map ("before alloc"); while (((direction == allocate_forward) && (current_index < end_index)) || ((direction == allocate_backward) && (current_index > end_index))) { uint32_t current_val = *(current_index - ((direction == -1) ? 1 : 0)); uint32_t current_num_units = get_num_units (current_val); bool free_p = is_unit_memory_free (current_val); dprintf (REGIONS_LOG, ("ALLOC[%s: %Id]%d->%d", (free_p ? "F" : "B"), (size_t)current_num_units, (int)(current_index - region_map_left_start), (int)(current_index + current_num_units - region_map_left_start))); if (free_p) { if (current_num_units >= num_units) { dprintf (REGIONS_LOG, ("found %Id contiguous free units(%d->%d), sufficient", (size_t)current_num_units, (int)(current_index - region_map_left_start), (int)(current_index - region_map_left_start + current_num_units))); uint32_t* busy_block; uint32_t* free_block; if (direction == 1) { busy_block = current_index; free_block = current_index + num_units; } else { busy_block = current_index - num_units; free_block = current_index - current_num_units; } make_busy_block (busy_block, num_units); if ((current_num_units - num_units) > 0) { make_free_block (free_block, (current_num_units - num_units)); } total_free_units -= num_units; print_map ("alloc: found in free"); leave_spin_lock(); return region_address_of (busy_block); } } if (direction == allocate_forward) { current_index += current_num_units; } else { current_index -= current_num_units; } } uint8_t* alloc = allocate_end (num_units, direction); if (alloc) { total_free_units -= num_units; if (fn != nullptr) { if (!fn (global_region_left_used)) { delete_region_impl (alloc); alloc = nullptr; } } if (alloc) { print_map ("alloc: found at the end"); } } else { dprintf (REGIONS_LOG, ("couldn't find memory at the end! only %Id bytes left", (global_region_right_used - global_region_left_used))); } leave_spin_lock(); return alloc; } // ETW TODO: need to fire create seg events for these methods. // FIRE_EVENT(GCCreateSegment_V1 bool region_allocator::allocate_region (size_t size, uint8_t** start, uint8_t** end, allocate_direction direction, region_allocator_callback_fn fn) { size_t alignment = region_alignment; size_t alloc_size = align_region_up (size); uint32_t num_units = (uint32_t)(alloc_size / alignment); bool ret = false; uint8_t* alloc = NULL; dprintf (REGIONS_LOG, ("----GET %d-----", num_units)); alloc = allocate (num_units, direction, fn); *start = alloc; *end = alloc + alloc_size; ret = (alloc != NULL); return ret; } bool region_allocator::allocate_basic_region (uint8_t** start, uint8_t** end, region_allocator_callback_fn fn) { return allocate_region (region_alignment, start, end, allocate_forward, fn); } // Large regions are 8x basic region sizes by default. If you need a larger region than that, // call allocate_region with the size. bool region_allocator::allocate_large_region (uint8_t** start, uint8_t** end, allocate_direction direction, size_t size, region_allocator_callback_fn fn) { if (size == 0) size = large_region_alignment; else { // round up size to a multiple of large_region_alignment // for the below computation to work, large_region_alignment must be a power of 2 assert (round_up_power2(large_region_alignment) == large_region_alignment); size = (size + (large_region_alignment - 1)) & ~(large_region_alignment - 1); } return allocate_region (size, start, end, direction, fn); } void region_allocator::delete_region (uint8_t* region_start) { enter_spin_lock(); delete_region_impl (region_start); leave_spin_lock(); } void region_allocator::delete_region_impl (uint8_t* region_start) { ASSERT_HOLDING_SPIN_LOCK (&region_allocator_lock); assert (is_region_aligned (region_start)); print_map ("before delete"); uint32_t* current_index = region_map_index_of (region_start); uint32_t current_val = *current_index; assert (!is_unit_memory_free (current_val)); dprintf (REGIONS_LOG, ("----DEL %d (%u units)-----", (current_index - region_map_left_start), current_val)); uint32_t* region_end_index = current_index + current_val; uint8_t* region_end = region_address_of (region_end_index); int free_block_size = current_val; uint32_t* free_index = current_index; if ((current_index != region_map_left_start) && (current_index != region_map_right_start)) { uint32_t previous_val = *(current_index - 1); if (is_unit_memory_free(previous_val)) { uint32_t previous_size = get_num_units (previous_val); free_index -= previous_size; free_block_size += previous_size; } } if ((region_end != global_region_left_used) && (region_end != global_region_end)) { uint32_t next_val = *region_end_index; if (is_unit_memory_free(next_val)) { uint32_t next_size = get_num_units (next_val); free_block_size += next_size; region_end += next_size; } } if (region_end == global_region_left_used) { region_map_left_end = free_index; dprintf (REGIONS_LOG, ("adjust global left used from %Ix to %Ix", global_region_left_used, region_address_of (free_index))); global_region_left_used = region_address_of (free_index); } else if (region_start == global_region_right_used) { region_map_right_start = free_index + free_block_size; dprintf (REGIONS_LOG, ("adjust global right used from %Ix to %Ix", global_region_right_used, region_address_of (free_index + free_block_size))); global_region_right_used = region_address_of (free_index + free_block_size); } else { make_free_block (free_index, free_block_size); } total_free_units += current_val; print_map ("after delete"); } void region_allocator::move_highest_free_regions (int64_t n, bool small_region_p, region_free_list to_free_list[count_free_region_kinds]) { assert (n > 0); uint32_t* current_index = region_map_left_end - 1; uint32_t* lowest_index = region_map_left_start; while (current_index >= lowest_index) { uint32_t current_val = *current_index; uint32_t current_num_units = get_num_units (current_val); bool free_p = is_unit_memory_free (current_val); if (!free_p && ((current_num_units == 1) == small_region_p)) { uint32_t* index = current_index - (current_num_units - 1); heap_segment* region = get_region_info (region_address_of (index)); if (is_free_region (region)) { if (n >= current_num_units) { n -= current_num_units; region_free_list::unlink_region (region); region_free_list::add_region (region, to_free_list); } else { break; } } } current_index -= current_num_units; } } #endif //USE_REGIONS inline uint8_t* align_on_segment (uint8_t* add) { return (uint8_t*)((size_t)(add + (((size_t)1 << gc_heap::min_segment_size_shr) - 1)) & ~(((size_t)1 << gc_heap::min_segment_size_shr) - 1)); } inline uint8_t* align_lower_segment (uint8_t* add) { return (uint8_t*)((size_t)(add) & ~(((size_t)1 << gc_heap::min_segment_size_shr) - 1)); } size_t size_seg_mapping_table_of (uint8_t* from, uint8_t* end) { from = align_lower_segment (from); end = align_on_segment (end); dprintf (1, ("from: %Ix, end: %Ix, size: %Ix", from, end, sizeof (seg_mapping)*(((size_t)(end - from) >> gc_heap::min_segment_size_shr)))); return sizeof (seg_mapping)*((size_t)(end - from) >> gc_heap::min_segment_size_shr); } inline size_t seg_mapping_word_of (uint8_t* add) { return (size_t)add >> gc_heap::min_segment_size_shr; } #ifdef FEATURE_BASICFREEZE inline size_t ro_seg_begin_index (heap_segment* seg) { #ifdef USE_REGIONS size_t begin_index = (size_t)heap_segment_mem (seg) >> gc_heap::min_segment_size_shr; #else size_t begin_index = (size_t)seg >> gc_heap::min_segment_size_shr; #endif //USE_REGIONS begin_index = max (begin_index, (size_t)g_gc_lowest_address >> gc_heap::min_segment_size_shr); return begin_index; } inline size_t ro_seg_end_index (heap_segment* seg) { size_t end_index = (size_t)(heap_segment_reserved (seg) - 1) >> gc_heap::min_segment_size_shr; end_index = min (end_index, (size_t)g_gc_highest_address >> gc_heap::min_segment_size_shr); return end_index; } void seg_mapping_table_add_ro_segment (heap_segment* seg) { if ((heap_segment_reserved (seg) <= g_gc_lowest_address) || (heap_segment_mem (seg) >= g_gc_highest_address)) return; for (size_t entry_index = ro_seg_begin_index (seg); entry_index <= ro_seg_end_index (seg); entry_index++) { #ifdef USE_REGIONS heap_segment* region = (heap_segment*)&seg_mapping_table[entry_index]; heap_segment_allocated (region) = (uint8_t*)ro_in_entry; #else seg_mapping_table[entry_index].seg1 = (heap_segment*)((size_t)seg_mapping_table[entry_index].seg1 | ro_in_entry); #endif //USE_REGIONS } } void seg_mapping_table_remove_ro_segment (heap_segment* seg) { UNREFERENCED_PARAMETER(seg); #if 0 // POSSIBLE PERF TODO: right now we are not doing anything because we can't simply remove the flag. If it proves // to be a perf problem, we can search in the current ro segs and see if any lands in this range and only // remove the flag if none lands in this range. #endif //0 } heap_segment* ro_segment_lookup (uint8_t* o) { uint8_t* ro_seg_start = o; heap_segment* seg = (heap_segment*)gc_heap::seg_table->lookup (ro_seg_start); if (ro_seg_start && in_range_for_segment (o, seg)) return seg; else return 0; } #endif //FEATURE_BASICFREEZE void gc_heap::seg_mapping_table_add_segment (heap_segment* seg, gc_heap* hp) { #ifndef USE_REGIONS size_t seg_end = (size_t)(heap_segment_reserved (seg) - 1); size_t begin_index = (size_t)seg >> gc_heap::min_segment_size_shr; seg_mapping* begin_entry = &seg_mapping_table[begin_index]; size_t end_index = seg_end >> gc_heap::min_segment_size_shr; seg_mapping* end_entry = &seg_mapping_table[end_index]; dprintf (2, ("adding seg %Ix(%d)-%Ix(%d)", seg, begin_index, heap_segment_reserved (seg), end_index)); dprintf (2, ("before add: begin entry%d: boundary: %Ix; end entry: %d: boundary: %Ix", begin_index, (seg_mapping_table[begin_index].boundary + 1), end_index, (seg_mapping_table[end_index].boundary + 1))); #ifdef MULTIPLE_HEAPS #ifdef SIMPLE_DPRINTF dprintf (2, ("begin %d: h0: %Ix(%d), h1: %Ix(%d); end %d: h0: %Ix(%d), h1: %Ix(%d)", begin_index, (uint8_t*)(begin_entry->h0), (begin_entry->h0 ? begin_entry->h0->heap_number : -1), (uint8_t*)(begin_entry->h1), (begin_entry->h1 ? begin_entry->h1->heap_number : -1), end_index, (uint8_t*)(end_entry->h0), (end_entry->h0 ? end_entry->h0->heap_number : -1), (uint8_t*)(end_entry->h1), (end_entry->h1 ? end_entry->h1->heap_number : -1))); #endif //SIMPLE_DPRINTF assert (end_entry->boundary == 0); assert (end_entry->h0 == 0); end_entry->h0 = hp; assert (begin_entry->h1 == 0); begin_entry->h1 = hp; #else UNREFERENCED_PARAMETER(hp); #endif //MULTIPLE_HEAPS end_entry->boundary = (uint8_t*)seg_end; dprintf (2, ("set entry %d seg1 and %d seg0 to %Ix", begin_index, end_index, seg)); assert ((begin_entry->seg1 == 0) || ((size_t)(begin_entry->seg1) == ro_in_entry)); begin_entry->seg1 = (heap_segment*)((size_t)(begin_entry->seg1) | (size_t)seg); end_entry->seg0 = seg; // for every entry inbetween we need to set its heap too. for (size_t entry_index = (begin_index + 1); entry_index <= (end_index - 1); entry_index++) { assert (seg_mapping_table[entry_index].boundary == 0); #ifdef MULTIPLE_HEAPS assert (seg_mapping_table[entry_index].h0 == 0); seg_mapping_table[entry_index].h1 = hp; #endif //MULTIPLE_HEAPS seg_mapping_table[entry_index].seg1 = seg; } dprintf (2, ("after add: begin entry%d: boundary: %Ix; end entry: %d: boundary: %Ix", begin_index, (seg_mapping_table[begin_index].boundary + 1), end_index, (seg_mapping_table[end_index].boundary + 1))); #if defined(MULTIPLE_HEAPS) && defined(SIMPLE_DPRINTF) dprintf (2, ("begin %d: h0: %Ix(%d), h1: %Ix(%d); end: %d h0: %Ix(%d), h1: %Ix(%d)", begin_index, (uint8_t*)(begin_entry->h0), (begin_entry->h0 ? begin_entry->h0->heap_number : -1), (uint8_t*)(begin_entry->h1), (begin_entry->h1 ? begin_entry->h1->heap_number : -1), end_index, (uint8_t*)(end_entry->h0), (end_entry->h0 ? end_entry->h0->heap_number : -1), (uint8_t*)(end_entry->h1), (end_entry->h1 ? end_entry->h1->heap_number : -1))); #endif //MULTIPLE_HEAPS && SIMPLE_DPRINTF #endif //!USE_REGIONS } void gc_heap::seg_mapping_table_remove_segment (heap_segment* seg) { #ifndef USE_REGIONS size_t seg_end = (size_t)(heap_segment_reserved (seg) - 1); size_t begin_index = (size_t)seg >> gc_heap::min_segment_size_shr; seg_mapping* begin_entry = &seg_mapping_table[begin_index]; size_t end_index = seg_end >> gc_heap::min_segment_size_shr; seg_mapping* end_entry = &seg_mapping_table[end_index]; dprintf (2, ("removing seg %Ix(%d)-%Ix(%d)", seg, begin_index, heap_segment_reserved (seg), end_index)); assert (end_entry->boundary == (uint8_t*)seg_end); end_entry->boundary = 0; #ifdef MULTIPLE_HEAPS gc_heap* hp = heap_segment_heap (seg); assert (end_entry->h0 == hp); end_entry->h0 = 0; assert (begin_entry->h1 == hp); begin_entry->h1 = 0; #endif //MULTIPLE_HEAPS assert (begin_entry->seg1 != 0); begin_entry->seg1 = (heap_segment*)((size_t)(begin_entry->seg1) & ro_in_entry); end_entry->seg0 = 0; // for every entry inbetween we need to reset its heap too. for (size_t entry_index = (begin_index + 1); entry_index <= (end_index - 1); entry_index++) { assert (seg_mapping_table[entry_index].boundary == 0); #ifdef MULTIPLE_HEAPS assert (seg_mapping_table[entry_index].h0 == 0); assert (seg_mapping_table[entry_index].h1 == hp); seg_mapping_table[entry_index].h1 = 0; #endif //MULTIPLE_HEAPS seg_mapping_table[entry_index].seg1 = 0; } dprintf (2, ("after remove: begin entry%d: boundary: %Ix; end entry: %d: boundary: %Ix", begin_index, (seg_mapping_table[begin_index].boundary + 1), end_index, (seg_mapping_table[end_index].boundary + 1))); #ifdef MULTIPLE_HEAPS dprintf (2, ("begin %d: h0: %Ix, h1: %Ix; end: %d h0: %Ix, h1: %Ix", begin_index, (uint8_t*)(begin_entry->h0), (uint8_t*)(begin_entry->h1), end_index, (uint8_t*)(end_entry->h0), (uint8_t*)(end_entry->h1))); #endif //MULTIPLE_HEAPS #endif //!USE_REGIONS } #ifdef MULTIPLE_HEAPS inline gc_heap* seg_mapping_table_heap_of_worker (uint8_t* o) { size_t index = (size_t)o >> gc_heap::min_segment_size_shr; seg_mapping* entry = &seg_mapping_table[index]; #ifdef USE_REGIONS gc_heap* hp = heap_segment_heap ((heap_segment*)entry); #else gc_heap* hp = ((o > entry->boundary) ? entry->h1 : entry->h0); dprintf (2, ("checking obj %Ix, index is %Id, entry: boundary: %Ix, h0: %Ix, seg0: %Ix, h1: %Ix, seg1: %Ix", o, index, (entry->boundary + 1), (uint8_t*)(entry->h0), (uint8_t*)(entry->seg0), (uint8_t*)(entry->h1), (uint8_t*)(entry->seg1))); #ifdef _DEBUG heap_segment* seg = ((o > entry->boundary) ? entry->seg1 : entry->seg0); #ifdef FEATURE_BASICFREEZE if ((size_t)seg & ro_in_entry) seg = (heap_segment*)((size_t)seg & ~ro_in_entry); #endif //FEATURE_BASICFREEZE #ifdef TRACE_GC if (seg) { if (in_range_for_segment (o, seg)) { dprintf (2, ("obj %Ix belongs to segment %Ix(-%Ix)", o, seg, (uint8_t*)heap_segment_allocated (seg))); } else { dprintf (2, ("found seg %Ix(-%Ix) for obj %Ix, but it's not on the seg", seg, (uint8_t*)heap_segment_allocated (seg), o)); } } else { dprintf (2, ("could not find obj %Ix in any existing segments", o)); } #endif //TRACE_GC #endif //_DEBUG #endif //USE_REGIONS return hp; } gc_heap* seg_mapping_table_heap_of (uint8_t* o) { if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address)) return 0; return seg_mapping_table_heap_of_worker (o); } gc_heap* seg_mapping_table_heap_of_gc (uint8_t* o) { #ifdef FEATURE_BASICFREEZE if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address)) return 0; #endif //FEATURE_BASICFREEZE return seg_mapping_table_heap_of_worker (o); } #endif //MULTIPLE_HEAPS // Only returns a valid seg if we can actually find o on the seg. heap_segment* seg_mapping_table_segment_of (uint8_t* o) { #ifdef FEATURE_BASICFREEZE if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address)) return ro_segment_lookup (o); #endif //FEATURE_BASICFREEZE size_t index = (size_t)o >> gc_heap::min_segment_size_shr; seg_mapping* entry = &seg_mapping_table[index]; #ifdef USE_REGIONS // REGIONS TODO: I think we could simplify this to having the same info for each // basic entry in a large region so we can get it right away instead of having to go // back some entries. ptrdiff_t first_field = (ptrdiff_t)heap_segment_allocated ((heap_segment*)entry); if (first_field == 0) { dprintf (REGIONS_LOG, ("asked for seg for %Ix, in a freed region mem: %Ix, committed %Ix", o, heap_segment_mem ((heap_segment*)entry), heap_segment_committed ((heap_segment*)entry))); return 0; } // Regions are never going to intersect an ro seg, so this can never be ro_in_entry. assert (first_field != 0); assert (first_field != ro_in_entry); if (first_field < 0) { index += first_field; } heap_segment* seg = (heap_segment*)&seg_mapping_table[index]; #else //USE_REGIONS dprintf (2, ("checking obj %Ix, index is %Id, entry: boundary: %Ix, seg0: %Ix, seg1: %Ix", o, index, (entry->boundary + 1), (uint8_t*)(entry->seg0), (uint8_t*)(entry->seg1))); heap_segment* seg = ((o > entry->boundary) ? entry->seg1 : entry->seg0); #ifdef FEATURE_BASICFREEZE if ((size_t)seg & ro_in_entry) seg = (heap_segment*)((size_t)seg & ~ro_in_entry); #endif //FEATURE_BASICFREEZE #endif //USE_REGIONS if (seg) { if (in_range_for_segment (o, seg)) { dprintf (2, ("obj %Ix belongs to segment %Ix(-%Ix)", o, (uint8_t*)heap_segment_mem(seg), (uint8_t*)heap_segment_reserved(seg))); } else { dprintf (2, ("found seg %Ix(-%Ix) for obj %Ix, but it's not on the seg, setting it to 0", (uint8_t*)heap_segment_mem(seg), (uint8_t*)heap_segment_reserved(seg), o)); seg = 0; } } else { dprintf (2, ("could not find obj %Ix in any existing segments", o)); } #ifdef FEATURE_BASICFREEZE // TODO: This was originally written assuming that the seg_mapping_table would always contain entries for ro // segments whenever the ro segment falls into the [g_gc_lowest_address,g_gc_highest_address) range. I.e., it had an // extra "&& (size_t)(entry->seg1) & ro_in_entry" expression. However, at the moment, grow_brick_card_table does // not correctly go through the ro segments and add them back to the seg_mapping_table when the [lowest,highest) // range changes. We should probably go ahead and modify grow_brick_card_table and put back the // "&& (size_t)(entry->seg1) & ro_in_entry" here. if (!seg) { seg = ro_segment_lookup (o); if (seg && !in_range_for_segment (o, seg)) seg = 0; } #endif //FEATURE_BASICFREEZE return seg; } size_t gcard_of ( uint8_t*); #define GC_MARKED (size_t)0x1 #ifdef DOUBLY_LINKED_FL // This bit indicates that we'll need to set the bgc mark bit for this object during an FGC. // We only do this when we decide to compact. #define BGC_MARKED_BY_FGC (size_t)0x2 #define MAKE_FREE_OBJ_IN_COMPACT (size_t)0x4 #define ALLOWED_SPECIAL_HEADER_BITS (GC_MARKED|BGC_MARKED_BY_FGC|MAKE_FREE_OBJ_IN_COMPACT) #else //DOUBLY_LINKED_FL #define ALLOWED_SPECIAL_HEADER_BITS (GC_MARKED) #endif //!DOUBLY_LINKED_FL #ifdef HOST_64BIT #define SPECIAL_HEADER_BITS (0x7) #else #define SPECIAL_HEADER_BITS (0x3) #endif #define slot(i, j) ((uint8_t**)(i))[(j)+1] #define free_object_base_size (plug_skew + sizeof(ArrayBase)) #define free_list_slot(x) ((uint8_t**)(x))[2] #define free_list_undo(x) ((uint8_t**)(x))[-1] #define UNDO_EMPTY ((uint8_t*)1) #ifdef DOUBLY_LINKED_FL #define free_list_prev(x) ((uint8_t**)(x))[3] #define PREV_EMPTY ((uint8_t*)1) void check_and_clear_in_free_list (uint8_t* o, size_t size) { if (size >= min_free_list) { free_list_prev (o) = PREV_EMPTY; } } // This is used when we need to clear the prev bit for a free object we made because we know // it's not actually a free obj (it's just a temporary thing during allocation). void clear_prev_bit (uint8_t* o, size_t size) { if (size >= min_free_list) { free_list_prev (o) = 0; } } #endif //DOUBLY_LINKED_FL class CObjectHeader : public Object { public: #if defined(FEATURE_REDHAWK) || defined(BUILD_AS_STANDALONE) // The GC expects the following methods that are provided by the Object class in the CLR but not provided // by Redhawk's version of Object. uint32_t GetNumComponents() { return ((ArrayBase *)this)->GetNumComponents(); } void Validate(BOOL bDeep=TRUE) { MethodTable * pMT = GetMethodTable(); _ASSERTE(pMT->SanityCheck()); bool noRangeChecks = (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_NO_RANGE_CHECKS) == GCConfig::HEAPVERIFY_NO_RANGE_CHECKS; BOOL fSmallObjectHeapPtr = FALSE, fLargeObjectHeapPtr = FALSE; if (!noRangeChecks) { fSmallObjectHeapPtr = g_theGCHeap->IsHeapPointer(this, TRUE); if (!fSmallObjectHeapPtr) fLargeObjectHeapPtr = g_theGCHeap->IsHeapPointer(this); _ASSERTE(fSmallObjectHeapPtr || fLargeObjectHeapPtr); } #ifdef FEATURE_STRUCTALIGN _ASSERTE(IsStructAligned((uint8_t *)this, GetMethodTable()->GetBaseAlignment())); #endif // FEATURE_STRUCTALIGN #if defined(FEATURE_64BIT_ALIGNMENT) && !defined(FEATURE_REDHAWK) if (pMT->RequiresAlign8()) { _ASSERTE((((size_t)this) & 0x7) == (pMT->IsValueType() ? 4U : 0U)); } #endif // FEATURE_64BIT_ALIGNMENT #ifdef VERIFY_HEAP if (bDeep && (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)) g_theGCHeap->ValidateObjectMember(this); #endif if (fSmallObjectHeapPtr) { #ifdef FEATURE_BASICFREEZE _ASSERTE(!g_theGCHeap->IsLargeObject(this) || g_theGCHeap->IsInFrozenSegment(this)); #else _ASSERTE(!g_theGCHeap->IsLargeObject(this)); #endif } } void ValidateHeap(BOOL bDeep) { Validate(bDeep); } #endif //FEATURE_REDHAWK || BUILD_AS_STANDALONE ///// // // Header Status Information // MethodTable *GetMethodTable() const { return( (MethodTable *) (((size_t) RawGetMethodTable()) & (~SPECIAL_HEADER_BITS))); } void SetMarked() { _ASSERTE(RawGetMethodTable()); RawSetMethodTable((MethodTable *) (((size_t) RawGetMethodTable()) | GC_MARKED)); } BOOL IsMarked() const { return !!(((size_t)RawGetMethodTable()) & GC_MARKED); } void SetPinned() { assert (!(gc_heap::settings.concurrent)); GetHeader()->SetGCBit(); } BOOL IsPinned() const { return !!((((CObjectHeader*)this)->GetHeader()->GetBits()) & BIT_SBLK_GC_RESERVE); } // Now we set more bits should actually only clear the mark bit void ClearMarked() { #ifdef DOUBLY_LINKED_FL RawSetMethodTable ((MethodTable *)(((size_t) RawGetMethodTable()) & (~GC_MARKED))); #else RawSetMethodTable (GetMethodTable()); #endif //DOUBLY_LINKED_FL } #ifdef DOUBLY_LINKED_FL void SetBGCMarkBit() { RawSetMethodTable((MethodTable *) (((size_t) RawGetMethodTable()) | BGC_MARKED_BY_FGC)); } BOOL IsBGCMarkBitSet() const { return !!(((size_t)RawGetMethodTable()) & BGC_MARKED_BY_FGC); } void ClearBGCMarkBit() { RawSetMethodTable((MethodTable *)(((size_t) RawGetMethodTable()) & (~BGC_MARKED_BY_FGC))); } void SetFreeObjInCompactBit() { RawSetMethodTable((MethodTable *) (((size_t) RawGetMethodTable()) | MAKE_FREE_OBJ_IN_COMPACT)); } BOOL IsFreeObjInCompactBitSet() const { return !!(((size_t)RawGetMethodTable()) & MAKE_FREE_OBJ_IN_COMPACT); } void ClearFreeObjInCompactBit() { #ifdef _DEBUG // check this looks like an object, but do NOT validate pointers to other objects // as these may not be valid yet - we are calling this during compact_phase Validate(FALSE); #endif //_DEBUG RawSetMethodTable((MethodTable *)(((size_t) RawGetMethodTable()) & (~MAKE_FREE_OBJ_IN_COMPACT))); } #endif //DOUBLY_LINKED_FL size_t ClearSpecialBits() { size_t special_bits = ((size_t)RawGetMethodTable()) & SPECIAL_HEADER_BITS; if (special_bits != 0) { assert ((special_bits & (~ALLOWED_SPECIAL_HEADER_BITS)) == 0); RawSetMethodTable ((MethodTable*)(((size_t)RawGetMethodTable()) & ~(SPECIAL_HEADER_BITS))); } return special_bits; } void SetSpecialBits (size_t special_bits) { assert ((special_bits & (~ALLOWED_SPECIAL_HEADER_BITS)) == 0); if (special_bits != 0) { RawSetMethodTable ((MethodTable*)(((size_t)RawGetMethodTable()) | special_bits)); } } CGCDesc *GetSlotMap () { assert (GetMethodTable()->ContainsPointers()); return CGCDesc::GetCGCDescFromMT(GetMethodTable()); } void SetFree(size_t size) { assert (size >= free_object_base_size); assert (g_gc_pFreeObjectMethodTable->GetBaseSize() == free_object_base_size); assert (g_gc_pFreeObjectMethodTable->RawGetComponentSize() == 1); RawSetMethodTable( g_gc_pFreeObjectMethodTable ); size_t* numComponentsPtr = (size_t*) &((uint8_t*) this)[ArrayBase::GetOffsetOfNumComponents()]; *numComponentsPtr = size - free_object_base_size; #ifdef VERIFY_HEAP //This introduces a bug in the free list management. //((void**) this)[-1] = 0; // clear the sync block, assert (*numComponentsPtr >= 0); if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { memset (((uint8_t*)this)+sizeof(ArrayBase), 0xcc, *numComponentsPtr); #ifdef DOUBLY_LINKED_FL // However, in this case we can't leave the Next field uncleared because no one will clear it // so it remains 0xcc and that's not good for verification if (*numComponentsPtr > 0) { free_list_slot (this) = 0; } #endif //DOUBLY_LINKED_FL } #endif //VERIFY_HEAP #ifdef DOUBLY_LINKED_FL // For background GC, we need to distinguish between a free object that's not on the free list // and one that is. So we always set its prev to PREV_EMPTY to indicate that it's a free // object that's not on the free list. If it should be on the free list, it will be set to the // appropriate non zero value. check_and_clear_in_free_list ((uint8_t*)this, size); #endif //DOUBLY_LINKED_FL } void UnsetFree() { size_t size = free_object_base_size - plug_skew; // since we only need to clear 2 ptr size, we do it manually PTR_PTR m = (PTR_PTR) this; for (size_t i = 0; i < size / sizeof(PTR_PTR); i++) *(m++) = 0; } BOOL IsFree () const { return (GetMethodTable() == g_gc_pFreeObjectMethodTable); } #ifdef FEATURE_STRUCTALIGN int GetRequiredAlignment () const { return GetMethodTable()->GetRequiredAlignment(); } #endif // FEATURE_STRUCTALIGN BOOL ContainsPointers() const { return GetMethodTable()->ContainsPointers(); } #ifdef COLLECTIBLE_CLASS BOOL Collectible() const { return GetMethodTable()->Collectible(); } FORCEINLINE BOOL ContainsPointersOrCollectible() const { MethodTable *pMethodTable = GetMethodTable(); return (pMethodTable->ContainsPointers() || pMethodTable->Collectible()); } #endif //COLLECTIBLE_CLASS Object* GetObjectBase() const { return (Object*) this; } }; #define header(i) ((CObjectHeader*)(i)) #ifdef DOUBLY_LINKED_FL inline BOOL is_on_free_list (uint8_t* o, size_t size) { if (size >= min_free_list) { if (header(o)->GetMethodTable() == g_gc_pFreeObjectMethodTable) { return (free_list_prev (o) != PREV_EMPTY); } } return FALSE; } inline void set_plug_bgc_mark_bit (uint8_t* node) { header(node)->SetBGCMarkBit(); } inline BOOL is_plug_bgc_mark_bit_set (uint8_t* node) { return header(node)->IsBGCMarkBitSet(); } inline void clear_plug_bgc_mark_bit (uint8_t* node) { header(node)->ClearBGCMarkBit(); } inline void set_free_obj_in_compact_bit (uint8_t* node) { header(node)->SetFreeObjInCompactBit(); } inline BOOL is_free_obj_in_compact_bit_set (uint8_t* node) { return header(node)->IsFreeObjInCompactBitSet(); } inline void clear_free_obj_in_compact_bit (uint8_t* node) { header(node)->ClearFreeObjInCompactBit(); } #endif //DOUBLY_LINKED_FL #ifdef SHORT_PLUGS inline void set_plug_padded (uint8_t* node) { header(node)->SetMarked(); } inline void clear_plug_padded (uint8_t* node) { header(node)->ClearMarked(); } inline BOOL is_plug_padded (uint8_t* node) { return header(node)->IsMarked(); } #else //SHORT_PLUGS inline void set_plug_padded (uint8_t* node){} inline void clear_plug_padded (uint8_t* node){} inline BOOL is_plug_padded (uint8_t* node){return FALSE;} #endif //SHORT_PLUGS inline size_t clear_special_bits (uint8_t* node) { return header(node)->ClearSpecialBits(); } inline void set_special_bits (uint8_t* node, size_t special_bits) { header(node)->SetSpecialBits (special_bits); } inline size_t unused_array_size(uint8_t * p) { assert(((CObjectHeader*)p)->IsFree()); size_t* numComponentsPtr = (size_t*)(p + ArrayBase::GetOffsetOfNumComponents()); return free_object_base_size + *numComponentsPtr; } inline heap_segment* heap_segment_non_sip (heap_segment* ns) { #ifdef USE_REGIONS if ((ns == 0) || !heap_segment_swept_in_plan (ns)) { return ns; } else { do { if (heap_segment_swept_in_plan (ns)) { dprintf (REGIONS_LOG, ("region %Ix->%Ix SIP", heap_segment_mem (ns), heap_segment_allocated (ns))); } ns = heap_segment_next (ns); } while ((ns != 0) && heap_segment_swept_in_plan (ns)); return ns; } #else //USE_REGIONS return ns; #endif //USE_REGIONS } inline heap_segment* heap_segment_next_non_sip (heap_segment* seg) { heap_segment* ns = heap_segment_next (seg); #ifdef USE_REGIONS return heap_segment_non_sip (ns); #else return ns; #endif //USE_REGIONS } heap_segment* heap_segment_rw (heap_segment* ns) { if ((ns == 0) || !heap_segment_read_only_p (ns)) { return ns; } else { do { ns = heap_segment_next (ns); } while ((ns != 0) && heap_segment_read_only_p (ns)); return ns; } } //returns the next non ro segment. heap_segment* heap_segment_next_rw (heap_segment* seg) { heap_segment* ns = heap_segment_next (seg); return heap_segment_rw (ns); } // returns the segment before seg. heap_segment* heap_segment_prev_rw (heap_segment* begin, heap_segment* seg) { assert (begin != 0); heap_segment* prev = begin; heap_segment* current = heap_segment_next_rw (begin); while (current && current != seg) { prev = current; current = heap_segment_next_rw (current); } if (current == seg) { return prev; } else { return 0; } } // returns the segment before seg. heap_segment* heap_segment_prev (heap_segment* begin, heap_segment* seg) { assert (begin != 0); heap_segment* prev = begin; heap_segment* current = heap_segment_next (begin); while (current && current != seg) { prev = current; current = heap_segment_next (current); } if (current == seg) { return prev; } else { return 0; } } heap_segment* heap_segment_in_range (heap_segment* ns) { if ((ns == 0) || heap_segment_in_range_p (ns)) { return ns; } else { do { ns = heap_segment_next (ns); } while ((ns != 0) && !heap_segment_in_range_p (ns)); return ns; } } heap_segment* heap_segment_next_in_range (heap_segment* seg) { heap_segment* ns = heap_segment_next (seg); return heap_segment_in_range (ns); } struct imemory_data { uint8_t* memory_base; }; struct numa_reserved_block { uint8_t* memory_base; size_t block_size; numa_reserved_block() : memory_base(nullptr), block_size(0) { } }; struct initial_memory_details { imemory_data *initial_memory; imemory_data *initial_normal_heap; // points into initial_memory_array imemory_data *initial_large_heap; // points into initial_memory_array imemory_data *initial_pinned_heap; // points into initial_memory_array size_t block_size_normal; size_t block_size_large; size_t block_size_pinned; int block_count; // # of blocks in each int current_block_normal; int current_block_large; int current_block_pinned; enum { ALLATONCE = 1, EACH_GENERATION, EACH_BLOCK, ALLATONCE_SEPARATED_POH, EACH_NUMA_NODE }; size_t allocation_pattern; size_t block_size(int i) { switch (i / block_count) { case 0: return block_size_normal; case 1: return block_size_large; case 2: return block_size_pinned; default: __UNREACHABLE(); } }; void* get_initial_memory (int gen, int h_number) { switch (gen) { case soh_gen0: case soh_gen1: case soh_gen2: return initial_normal_heap[h_number].memory_base; case loh_generation: return initial_large_heap[h_number].memory_base; case poh_generation: return initial_pinned_heap[h_number].memory_base; default: __UNREACHABLE(); } }; size_t get_initial_size (int gen) { switch (gen) { case soh_gen0: case soh_gen1: case soh_gen2: return block_size_normal; case loh_generation: return block_size_large; case poh_generation: return block_size_pinned; default: __UNREACHABLE(); } }; int numa_reserved_block_count; numa_reserved_block* numa_reserved_block_table; }; initial_memory_details memory_details; BOOL gc_heap::reserve_initial_memory (size_t normal_size, size_t large_size, size_t pinned_size, int num_heaps, bool use_large_pages_p, bool separated_poh_p, uint16_t* heap_no_to_numa_node) { BOOL reserve_success = FALSE; // should only be called once assert (memory_details.initial_memory == 0); // soh + loh + poh segments * num_heaps memory_details.initial_memory = new (nothrow) imemory_data[num_heaps * (total_generation_count - ephemeral_generation_count)]; if (memory_details.initial_memory == 0) { dprintf (2, ("failed to reserve %Id bytes for imemory_data", num_heaps * (total_generation_count - ephemeral_generation_count) * sizeof (imemory_data))); return FALSE; } memory_details.initial_normal_heap = memory_details.initial_memory; memory_details.initial_large_heap = memory_details.initial_normal_heap + num_heaps; memory_details.initial_pinned_heap = memory_details.initial_large_heap + num_heaps; memory_details.block_size_normal = normal_size; memory_details.block_size_large = large_size; memory_details.block_size_pinned = pinned_size; memory_details.block_count = num_heaps; memory_details.current_block_normal = 0; memory_details.current_block_large = 0; memory_details.current_block_pinned = 0; g_gc_lowest_address = MAX_PTR; g_gc_highest_address = 0; if (((size_t)MAX_PTR - large_size) < normal_size) { // we are already overflowing with just one heap. dprintf (2, ("0x%Ix + 0x%Ix already overflow", normal_size, large_size)); return FALSE; } if (((size_t)MAX_PTR / memory_details.block_count) < (normal_size + large_size + pinned_size)) { dprintf (2, ("(0x%Ix + 0x%Ix)*0x%Ix overflow", normal_size, large_size, memory_details.block_count)); return FALSE; } // figure out number of NUMA nodes and allocate additional table for NUMA local reservation memory_details.numa_reserved_block_count = 0; memory_details.numa_reserved_block_table = nullptr; int numa_node_count = 0; if (heap_no_to_numa_node != nullptr) { uint16_t highest_numa_node = 0; // figure out the highest NUMA node for (int heap_no = 0; heap_no < num_heaps; heap_no++) { uint16_t heap_numa_node = heap_no_to_numa_node[heap_no]; highest_numa_node = max (highest_numa_node, heap_numa_node); } assert (highest_numa_node < MAX_SUPPORTED_CPUS); numa_node_count = highest_numa_node + 1; memory_details.numa_reserved_block_count = numa_node_count * (1 + separated_poh_p); memory_details.numa_reserved_block_table = new (nothrow) numa_reserved_block[memory_details.numa_reserved_block_count]; if (memory_details.numa_reserved_block_table == nullptr) { // we couldn't get the memory - continue as if doing the non-NUMA case dprintf(2, ("failed to reserve %Id bytes for numa_reserved_block data", memory_details.numa_reserved_block_count * sizeof(numa_reserved_block))); memory_details.numa_reserved_block_count = 0; } } if (memory_details.numa_reserved_block_table != nullptr) { // figure out how much to reserve on each NUMA node // note this can be very different between NUMA nodes, depending on // which processors our heaps are associated with size_t merged_pinned_size = separated_poh_p ? 0 : pinned_size; for (int heap_no = 0; heap_no < num_heaps; heap_no++) { uint16_t heap_numa_node = heap_no_to_numa_node[heap_no]; numa_reserved_block * block = &memory_details.numa_reserved_block_table[heap_numa_node]; // add the size required for this heap block->block_size += normal_size + large_size + merged_pinned_size; if (separated_poh_p) { numa_reserved_block* pinned_block = &memory_details.numa_reserved_block_table[numa_node_count + heap_numa_node]; // add the pinned size required for this heap pinned_block->block_size += pinned_size; } } // reserve the appropriate size on each NUMA node bool failure = false; for (int block_index = 0; block_index < memory_details.numa_reserved_block_count; block_index++) { numa_reserved_block * block = &memory_details.numa_reserved_block_table[block_index]; if (block->block_size == 0) continue; int numa_node = block_index % numa_node_count; bool pinned_block = block_index >= numa_node_count; block->memory_base = (uint8_t*)virtual_alloc (block->block_size, use_large_pages_p && !pinned_block, numa_node); if (block->memory_base == nullptr) { dprintf(2, ("failed to reserve %Id bytes for on NUMA node %u", block->block_size, numa_node)); failure = true; break; } else { g_gc_lowest_address = min(g_gc_lowest_address, block->memory_base); g_gc_highest_address = max(g_gc_highest_address, block->memory_base + block->block_size); } } if (failure) { // if we had any failures, undo the work done so far // we will instead use one of the other allocation patterns // we could try to use what we did succeed to reserve, but that gets complicated for (int block_index = 0; block_index < memory_details.numa_reserved_block_count; block_index++) { numa_reserved_block * block = &memory_details.numa_reserved_block_table[block_index]; if (block->memory_base != nullptr) { virtual_free(block->memory_base, block->block_size); block->memory_base = nullptr; } } delete [] memory_details.numa_reserved_block_table; memory_details.numa_reserved_block_table = nullptr; memory_details.numa_reserved_block_count = 0; } else { // for each NUMA node, give out the memory to its heaps for (uint16_t numa_node = 0; numa_node < numa_node_count; numa_node++) { numa_reserved_block * block = &memory_details.numa_reserved_block_table[numa_node]; numa_reserved_block* pinned_block = separated_poh_p ? &memory_details.numa_reserved_block_table[numa_node_count + numa_node] : nullptr; // if the block's size is 0, there can be no heaps on this NUMA node if (block->block_size == 0) { assert((pinned_block == nullptr) || (pinned_block->block_size == 0)); continue; } uint8_t* memory_base = block->memory_base; uint8_t* pinned_memory_base = ((pinned_block == nullptr) ? nullptr : pinned_block->memory_base); for (int heap_no = 0; heap_no < num_heaps; heap_no++) { uint16_t heap_numa_node = heap_no_to_numa_node[heap_no]; if (heap_numa_node != numa_node) { // this heap is on another NUMA node continue; } memory_details.initial_normal_heap[heap_no].memory_base = memory_base; memory_base += normal_size; memory_details.initial_large_heap[heap_no].memory_base = memory_base; memory_base += large_size; if (separated_poh_p) { memory_details.initial_pinned_heap[heap_no].memory_base = pinned_memory_base; pinned_memory_base += pinned_size; } else { memory_details.initial_pinned_heap[heap_no].memory_base = memory_base; memory_base += pinned_size; } } // sanity check - we should be at the end of the memory block for this NUMA node assert (memory_base == block->memory_base + block->block_size); assert ((pinned_block == nullptr) || (pinned_memory_base == pinned_block->memory_base + pinned_block->block_size)); } memory_details.allocation_pattern = initial_memory_details::EACH_NUMA_NODE; reserve_success = TRUE; } } if (!reserve_success) { size_t temp_pinned_size = (separated_poh_p ? 0 : pinned_size); size_t separate_pinned_size = memory_details.block_count * pinned_size; size_t requestedMemory = memory_details.block_count * (normal_size + large_size + temp_pinned_size); uint8_t* allatonce_block = (uint8_t*)virtual_alloc(requestedMemory, use_large_pages_p); uint8_t* separated_poh_block = nullptr; if (allatonce_block && separated_poh_p) { separated_poh_block = (uint8_t*)virtual_alloc(separate_pinned_size, false); if (!separated_poh_block) { virtual_free(allatonce_block, requestedMemory); allatonce_block = nullptr; } } if (allatonce_block) { if (separated_poh_p) { g_gc_lowest_address = min(allatonce_block, separated_poh_block); g_gc_highest_address = max((allatonce_block + requestedMemory), (separated_poh_block + separate_pinned_size)); memory_details.allocation_pattern = initial_memory_details::ALLATONCE_SEPARATED_POH; } else { g_gc_lowest_address = allatonce_block; g_gc_highest_address = allatonce_block + requestedMemory; memory_details.allocation_pattern = initial_memory_details::ALLATONCE; } for (int i = 0; i < memory_details.block_count; i++) { memory_details.initial_normal_heap[i].memory_base = allatonce_block + (i * normal_size); memory_details.initial_large_heap[i].memory_base = allatonce_block + (memory_details.block_count * normal_size) + (i * large_size); if (separated_poh_p) { memory_details.initial_pinned_heap[i].memory_base = separated_poh_block + (i * pinned_size); } else { memory_details.initial_pinned_heap[i].memory_base = allatonce_block + (memory_details.block_count * (normal_size + large_size)) + (i * pinned_size); } } reserve_success = TRUE; } else { // try to allocate 3 blocks uint8_t* b1 = (uint8_t*)virtual_alloc(memory_details.block_count * normal_size, use_large_pages_p); uint8_t* b2 = (uint8_t*)virtual_alloc(memory_details.block_count * large_size, use_large_pages_p); uint8_t* b3 = (uint8_t*)virtual_alloc(memory_details.block_count * pinned_size, use_large_pages_p && !separated_poh_p); if (b1 && b2 && b3) { memory_details.allocation_pattern = initial_memory_details::EACH_GENERATION; g_gc_lowest_address = min(b1, min(b2, b3)); g_gc_highest_address = max(b1 + memory_details.block_count * normal_size, max(b2 + memory_details.block_count * large_size, b3 + memory_details.block_count * pinned_size)); for (int i = 0; i < memory_details.block_count; i++) { memory_details.initial_normal_heap[i].memory_base = b1 + (i * normal_size); memory_details.initial_large_heap[i].memory_base = b2 + (i * large_size); memory_details.initial_pinned_heap[i].memory_base = b3 + (i * pinned_size); } reserve_success = TRUE; } else { // allocation failed, we'll go on to try allocating each block. // We could preserve the b1 alloc, but code complexity increases if (b1) virtual_free(b1, memory_details.block_count * normal_size); if (b2) virtual_free(b2, memory_details.block_count * large_size); if (b3) virtual_free(b3, memory_details.block_count * pinned_size); } if ((b2 == NULL) && (memory_details.block_count > 1)) { memory_details.allocation_pattern = initial_memory_details::EACH_BLOCK; imemory_data* current_block = memory_details.initial_memory; for (int i = 0; i < (memory_details.block_count * (total_generation_count - ephemeral_generation_count)); i++, current_block++) { size_t block_size = memory_details.block_size(i); uint16_t numa_node = NUMA_NODE_UNDEFINED; if (heap_no_to_numa_node != nullptr) { int heap_no = i % memory_details.block_count; numa_node = heap_no_to_numa_node[heap_no]; } current_block->memory_base = (uint8_t*)virtual_alloc(block_size, use_large_pages_p, numa_node); if (current_block->memory_base == 0) { // Free the blocks that we've allocated so far current_block = memory_details.initial_memory; for (int j = 0; j < i; j++, current_block++) { if (current_block->memory_base != 0) { block_size = memory_details.block_size(i); virtual_free(current_block->memory_base, block_size); } } reserve_success = FALSE; break; } else { if (current_block->memory_base < g_gc_lowest_address) g_gc_lowest_address = current_block->memory_base; if (((uint8_t*)current_block->memory_base + block_size) > g_gc_highest_address) g_gc_highest_address = (current_block->memory_base + block_size); } reserve_success = TRUE; } } } } if (reserve_success && separated_poh_p) { for (int heap_no = 0; (reserve_success && (heap_no < num_heaps)); heap_no++) { if (!GCToOSInterface::VirtualCommit(memory_details.initial_pinned_heap[heap_no].memory_base, pinned_size)) { reserve_success = FALSE; } } } return reserve_success; } void gc_heap::destroy_initial_memory() { if (memory_details.initial_memory != NULL) { switch (memory_details.allocation_pattern) { case initial_memory_details::ALLATONCE: virtual_free (memory_details.initial_memory[0].memory_base, memory_details.block_count*(memory_details.block_size_normal + memory_details.block_size_large + memory_details.block_size_pinned)); break; case initial_memory_details::ALLATONCE_SEPARATED_POH: virtual_free(memory_details.initial_memory[0].memory_base, memory_details.block_count * (memory_details.block_size_normal + memory_details.block_size_large)); virtual_free(memory_details.initial_pinned_heap[0].memory_base, memory_details.block_count * (memory_details.block_size_pinned)); break; case initial_memory_details::EACH_GENERATION: virtual_free (memory_details.initial_normal_heap[0].memory_base, memory_details.block_count*memory_details.block_size_normal); virtual_free (memory_details.initial_large_heap[0].memory_base, memory_details.block_count*memory_details.block_size_large); virtual_free (memory_details.initial_pinned_heap[0].memory_base, memory_details.block_count*memory_details.block_size_pinned); break; case initial_memory_details::EACH_BLOCK: { imemory_data* current_block = memory_details.initial_memory; int total_block_count = memory_details.block_count * (total_generation_count - ephemeral_generation_count); for (int i = 0; i < total_block_count; i++, current_block++) { size_t block_size = memory_details.block_size (i); if (current_block->memory_base != NULL) { virtual_free (current_block->memory_base, block_size); } } break; } case initial_memory_details::EACH_NUMA_NODE: for (int block_index = 0; block_index < memory_details.numa_reserved_block_count; block_index++) { numa_reserved_block * block = &memory_details.numa_reserved_block_table[block_index]; if (block->memory_base != nullptr) { virtual_free (block->memory_base, block->block_size); } } delete [] memory_details.numa_reserved_block_table; break; default: assert (!"unexpected allocation_pattern"); break; } delete [] memory_details.initial_memory; memory_details.initial_memory = NULL; memory_details.initial_normal_heap = NULL; memory_details.initial_large_heap = NULL; memory_details.initial_pinned_heap = NULL; } } heap_segment* make_initial_segment (int gen, int h_number, gc_heap* hp) { void* mem = memory_details.get_initial_memory (gen, h_number); size_t size = memory_details.get_initial_size (gen); heap_segment* res = gc_heap::make_heap_segment ((uint8_t*)mem, size, hp, gen); return res; } void* virtual_alloc (size_t size) { return virtual_alloc(size, false); } void* virtual_alloc (size_t size, bool use_large_pages_p, uint16_t numa_node) { size_t requested_size = size; if ((gc_heap::reserved_memory_limit - gc_heap::reserved_memory) < requested_size) { gc_heap::reserved_memory_limit = GCScan::AskForMoreReservedMemory (gc_heap::reserved_memory_limit, requested_size); if ((gc_heap::reserved_memory_limit - gc_heap::reserved_memory) < requested_size) { return 0; } } uint32_t flags = VirtualReserveFlags::None; #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP if (virtual_alloc_hardware_write_watch) { flags = VirtualReserveFlags::WriteWatch; } #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP void* prgmem = use_large_pages_p ? GCToOSInterface::VirtualReserveAndCommitLargePages(requested_size, numa_node) : GCToOSInterface::VirtualReserve(requested_size, card_size * card_word_width, flags, numa_node); void *aligned_mem = prgmem; // We don't want (prgmem + size) to be right at the end of the address space // because we'd have to worry about that everytime we do (address + size). // We also want to make sure that we leave loh_size_threshold at the end // so we allocate a small object we don't need to worry about overflow there // when we do alloc_ptr+size. if (prgmem) { uint8_t* end_mem = (uint8_t*)prgmem + requested_size; if ((end_mem == 0) || ((size_t)(MAX_PTR - end_mem) <= END_SPACE_AFTER_GC)) { GCToOSInterface::VirtualRelease (prgmem, requested_size); dprintf (2, ("Virtual Alloc size %Id returned memory right against 4GB [%Ix, %Ix[ - discarding", requested_size, (size_t)prgmem, (size_t)((uint8_t*)prgmem+requested_size))); prgmem = 0; aligned_mem = 0; } } if (prgmem) { gc_heap::reserved_memory += requested_size; } dprintf (2, ("Virtual Alloc size %Id: [%Ix, %Ix[", requested_size, (size_t)prgmem, (size_t)((uint8_t*)prgmem+requested_size))); return aligned_mem; } static size_t get_valid_segment_size (BOOL large_seg=FALSE) { size_t seg_size, initial_seg_size; if (!large_seg) { initial_seg_size = INITIAL_ALLOC; seg_size = static_cast<size_t>(GCConfig::GetSegmentSize()); } else { initial_seg_size = LHEAP_ALLOC; seg_size = static_cast<size_t>(GCConfig::GetSegmentSize()) / 2; } #ifdef MULTIPLE_HEAPS #ifdef HOST_64BIT if (!large_seg) #endif // HOST_64BIT { if (g_num_processors > 4) initial_seg_size /= 2; if (g_num_processors > 8) initial_seg_size /= 2; } #endif //MULTIPLE_HEAPS // if seg_size is small but not 0 (0 is default if config not set) // then set the segment to the minimum size if (!g_theGCHeap->IsValidSegmentSize(seg_size)) { // if requested size is between 1 byte and 4MB, use min if ((seg_size >> 1) && !(seg_size >> 22)) seg_size = 1024*1024*4; else seg_size = initial_seg_size; } #ifdef HOST_64BIT seg_size = round_up_power2 (seg_size); #else seg_size = round_down_power2 (seg_size); #endif // HOST_64BIT return (seg_size); } #ifndef USE_REGIONS void gc_heap::compute_new_ephemeral_size() { int eph_gen_max = max_generation - 1 - (settings.promotion ? 1 : 0); size_t padding_size = 0; for (int i = 0; i <= eph_gen_max; i++) { dynamic_data* dd = dynamic_data_of (i); total_ephemeral_size += (dd_survived_size (dd) - dd_pinned_survived_size (dd)); #ifdef RESPECT_LARGE_ALIGNMENT total_ephemeral_size += dd_num_npinned_plugs (dd) * switch_alignment_size (FALSE); #endif //RESPECT_LARGE_ALIGNMENT #ifdef FEATURE_STRUCTALIGN total_ephemeral_size += dd_num_npinned_plugs (dd) * MAX_STRUCTALIGN; #endif //FEATURE_STRUCTALIGN #ifdef SHORT_PLUGS padding_size += dd_padding_size (dd); #endif //SHORT_PLUGS } total_ephemeral_size += eph_gen_starts_size; #ifdef RESPECT_LARGE_ALIGNMENT size_t planned_ephemeral_size = heap_segment_plan_allocated (ephemeral_heap_segment) - generation_plan_allocation_start (generation_of (max_generation-1)); total_ephemeral_size = min (total_ephemeral_size, planned_ephemeral_size); #endif //RESPECT_LARGE_ALIGNMENT #ifdef SHORT_PLUGS total_ephemeral_size = Align ((size_t)((double)total_ephemeral_size * short_plugs_pad_ratio) + 1); total_ephemeral_size += Align (DESIRED_PLUG_LENGTH); #endif //SHORT_PLUGS dprintf (3, ("total ephemeral size is %Ix, padding %Ix(%Ix)", total_ephemeral_size, padding_size, (total_ephemeral_size - padding_size))); } #ifdef _MSC_VER #pragma warning(disable:4706) // "assignment within conditional expression" is intentional in this function. #endif // _MSC_VER heap_segment* gc_heap::soh_get_segment_to_expand() { size_t size = soh_segment_size; ordered_plug_indices_init = FALSE; use_bestfit = FALSE; //compute the size of the new ephemeral heap segment. compute_new_ephemeral_size(); if ((settings.pause_mode != pause_low_latency) && (settings.pause_mode != pause_no_gc) #ifdef BACKGROUND_GC && (!gc_heap::background_running_p()) #endif //BACKGROUND_GC ) { assert (settings.condemned_generation <= max_generation); allocator* gen_alloc = ((settings.condemned_generation == max_generation) ? nullptr : generation_allocator (generation_of (max_generation))); dprintf (2, ("(gen%d)soh_get_segment_to_expand", settings.condemned_generation)); // try to find one in the gen 2 segment list, search backwards because the first segments // tend to be more compact than the later ones. heap_segment* fseg = heap_segment_rw (generation_start_segment (generation_of (max_generation))); PREFIX_ASSUME(fseg != NULL); #ifdef SEG_REUSE_STATS int try_reuse = 0; #endif //SEG_REUSE_STATS heap_segment* seg = ephemeral_heap_segment; while ((seg = heap_segment_prev_rw (fseg, seg)) && (seg != fseg)) { #ifdef SEG_REUSE_STATS try_reuse++; #endif //SEG_REUSE_STATS if (can_expand_into_p (seg, size/3, total_ephemeral_size, gen_alloc)) { get_gc_data_per_heap()->set_mechanism (gc_heap_expand, (use_bestfit ? expand_reuse_bestfit : expand_reuse_normal)); if (settings.condemned_generation == max_generation) { if (use_bestfit) { build_ordered_free_spaces (seg); dprintf (GTC_LOG, ("can use best fit")); } #ifdef SEG_REUSE_STATS dprintf (SEG_REUSE_LOG_0, ("(gen%d)soh_get_segment_to_expand: found seg #%d to reuse", settings.condemned_generation, try_reuse)); #endif //SEG_REUSE_STATS dprintf (GTC_LOG, ("max_gen: Found existing segment to expand into %Ix", (size_t)seg)); return seg; } else { #ifdef SEG_REUSE_STATS dprintf (SEG_REUSE_LOG_0, ("(gen%d)soh_get_segment_to_expand: found seg #%d to reuse - returning", settings.condemned_generation, try_reuse)); #endif //SEG_REUSE_STATS dprintf (GTC_LOG, ("max_gen-1: Found existing segment to expand into %Ix", (size_t)seg)); // If we return 0 here, the allocator will think since we are short on end // of seg we need to trigger a full compacting GC. So if sustained low latency // is set we should acquire a new seg instead, that way we wouldn't be short. // The real solution, of course, is to actually implement seg reuse in gen1. if (settings.pause_mode != pause_sustained_low_latency) { dprintf (GTC_LOG, ("max_gen-1: SustainedLowLatency is set, acquire a new seg")); get_gc_data_per_heap()->set_mechanism (gc_heap_expand, expand_next_full_gc); return 0; } } } } } heap_segment* result = get_segment (size, gc_oh_num::soh); if(result) { #ifdef BACKGROUND_GC if (current_c_gc_state == c_gc_state_planning) { // When we expand heap during bgc sweep, we set the seg to be swept so // we'll always look at cards for objects on the new segment. result->flags |= heap_segment_flags_swept; } #endif //BACKGROUND_GC FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(result), (size_t)(heap_segment_reserved (result) - heap_segment_mem(result)), gc_etw_segment_small_object_heap); } get_gc_data_per_heap()->set_mechanism (gc_heap_expand, (result ? expand_new_seg : expand_no_memory)); if (result == 0) { dprintf (2, ("h%d: failed to allocate a new segment!", heap_number)); } else { #ifdef MULTIPLE_HEAPS heap_segment_heap (result) = this; #endif //MULTIPLE_HEAPS } dprintf (GTC_LOG, ("(gen%d)creating new segment %Ix", settings.condemned_generation, result)); return result; } #endif //!USE_REGIONS #ifdef _MSC_VER #pragma warning(default:4706) #endif // _MSC_VER //returns 0 in case of allocation failure heap_segment* gc_heap::get_segment (size_t size, gc_oh_num oh) { assert(oh != gc_oh_num::none); BOOL uoh_p = (oh == gc_oh_num::loh) || (oh == gc_oh_num::poh); if (heap_hard_limit) return NULL; heap_segment* result = 0; if (segment_standby_list != 0) { result = segment_standby_list; heap_segment* last = 0; while (result) { size_t hs = (size_t)(heap_segment_reserved (result) - (uint8_t*)result); if ((hs >= size) && ((hs / 2) < size)) { dprintf (2, ("Hoarded segment %Ix found", (size_t) result)); if (last) { heap_segment_next (last) = heap_segment_next (result); } else { segment_standby_list = heap_segment_next (result); } break; } else { last = result; result = heap_segment_next (result); } } } if (result) { init_heap_segment (result, __this #ifdef USE_REGIONS , 0, size, (uoh_p ? max_generation : 0) #endif //USE_REGIONS ); #ifdef BACKGROUND_GC if (is_bgc_in_progress()) { dprintf (GC_TABLE_LOG, ("hoarded seg %Ix, mark_array is %Ix", result, mark_array)); if (!commit_mark_array_new_seg (__this, result)) { dprintf (GC_TABLE_LOG, ("failed to commit mark array for hoarded seg")); // If we can't use it we need to thread it back. if (segment_standby_list != 0) { heap_segment_next (result) = segment_standby_list; segment_standby_list = result; } else { segment_standby_list = result; } result = 0; } } #endif //BACKGROUND_GC if (result) seg_mapping_table_add_segment (result, __this); } if (!result) { void* mem = virtual_alloc (size); if (!mem) { fgm_result.set_fgm (fgm_reserve_segment, size, uoh_p); return 0; } result = make_heap_segment ((uint8_t*)mem, size, __this, (uoh_p ? max_generation : 0)); if (result) { uint8_t* start; uint8_t* end; if (mem < g_gc_lowest_address) { start = (uint8_t*)mem; } else { start = (uint8_t*)g_gc_lowest_address; } if (((uint8_t*)mem + size) > g_gc_highest_address) { end = (uint8_t*)mem + size; } else { end = (uint8_t*)g_gc_highest_address; } if (gc_heap::grow_brick_card_tables (start, end, size, result, __this, uoh_p) != 0) { virtual_free (mem, size); return 0; } } else { fgm_result.set_fgm (fgm_commit_segment_beg, SEGMENT_INITIAL_COMMIT, uoh_p); virtual_free (mem, size); } if (result) { seg_mapping_table_add_segment (result, __this); } } #ifdef BACKGROUND_GC if (result) { ::record_changed_seg ((uint8_t*)result, heap_segment_reserved (result), settings.gc_index, current_bgc_state, seg_added); bgc_verify_mark_array_cleared (result); } #endif //BACKGROUND_GC dprintf (GC_TABLE_LOG, ("h%d: new seg: %Ix-%Ix (%Id)", heap_number, result, ((uint8_t*)result + size), size)); return result; } void gc_heap::release_segment (heap_segment* sg) { ptrdiff_t delta = 0; FIRE_EVENT(GCFreeSegment_V1, heap_segment_mem(sg)); virtual_free (sg, (uint8_t*)heap_segment_reserved (sg)-(uint8_t*)sg, sg); } heap_segment* gc_heap::get_segment_for_uoh (int gen_number, size_t size #ifdef MULTIPLE_HEAPS , gc_heap* hp #endif //MULTIPLE_HEAPS ) { #ifndef MULTIPLE_HEAPS gc_heap* hp = 0; #endif //MULTIPLE_HEAPS #ifdef USE_REGIONS heap_segment* res = hp->get_new_region (gen_number, size); #else //USE_REGIONS gc_oh_num oh = gen_to_oh (gen_number); heap_segment* res = hp->get_segment (size, oh); #endif //USE_REGIONS if (res != 0) { #ifdef MULTIPLE_HEAPS heap_segment_heap (res) = hp; #endif //MULTIPLE_HEAPS size_t flags = (gen_number == poh_generation) ? heap_segment_flags_poh : heap_segment_flags_loh; #ifdef USE_REGIONS // in the regions case, flags are set by get_new_region assert ((res->flags & (heap_segment_flags_loh | heap_segment_flags_poh)) == flags); #else //USE_REGIONS res->flags |= flags; #endif //USE_REGIONS FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(res), (size_t)(heap_segment_reserved (res) - heap_segment_mem(res)), (gen_number == poh_generation) ? gc_etw_segment_pinned_object_heap : gc_etw_segment_large_object_heap); #ifndef USE_REGIONS #ifdef MULTIPLE_HEAPS hp->thread_uoh_segment (gen_number, res); #else thread_uoh_segment (gen_number, res); #endif //MULTIPLE_HEAPS #endif //!USE_REGIONS GCToEEInterface::DiagAddNewRegion( gen_number, heap_segment_mem (res), heap_segment_allocated (res), heap_segment_reserved (res) ); } return res; } void gc_heap::thread_uoh_segment (int gen_number, heap_segment* new_seg) { heap_segment* seg = generation_allocation_segment (generation_of (gen_number)); while (heap_segment_next_rw (seg)) seg = heap_segment_next_rw (seg); heap_segment_next (seg) = new_seg; } heap_segment* gc_heap::get_uoh_segment (int gen_number, size_t size, BOOL* did_full_compact_gc) { *did_full_compact_gc = FALSE; size_t last_full_compact_gc_count = get_full_compact_gc_count(); //access to get_segment needs to be serialized add_saved_spinlock_info (true, me_release, mt_get_large_seg); leave_spin_lock (&more_space_lock_uoh); enter_spin_lock (&gc_heap::gc_lock); dprintf (SPINLOCK_LOG, ("[%d]Seg: Egc", heap_number)); // if a GC happened between here and before we ask for a segment in // get_uoh_segment, we need to count that GC. size_t current_full_compact_gc_count = get_full_compact_gc_count(); if (current_full_compact_gc_count > last_full_compact_gc_count) { *did_full_compact_gc = TRUE; } heap_segment* res = get_segment_for_uoh (gen_number, size #ifdef MULTIPLE_HEAPS , this #endif //MULTIPLE_HEAPS ); dprintf (SPINLOCK_LOG, ("[%d]Seg: A Lgc", heap_number)); leave_spin_lock (&gc_heap::gc_lock); enter_spin_lock (&more_space_lock_uoh); add_saved_spinlock_info (true, me_acquire, mt_get_large_seg); return res; } #ifdef MULTIPLE_HEAPS #ifdef HOST_X86 #ifdef _MSC_VER #pragma warning(disable:4035) static ptrdiff_t get_cycle_count() { __asm rdtsc } #pragma warning(default:4035) #elif defined(__GNUC__) static ptrdiff_t get_cycle_count() { ptrdiff_t cycles; ptrdiff_t cyclesHi; __asm__ __volatile__ ("rdtsc":"=a" (cycles), "=d" (cyclesHi)); return cycles; } #else //_MSC_VER #error Unknown compiler #endif //_MSC_VER #elif defined(TARGET_AMD64) #ifdef _MSC_VER extern "C" uint64_t __rdtsc(); #pragma intrinsic(__rdtsc) static ptrdiff_t get_cycle_count() { return (ptrdiff_t)__rdtsc(); } #elif defined(__GNUC__) static ptrdiff_t get_cycle_count() { ptrdiff_t cycles; ptrdiff_t cyclesHi; __asm__ __volatile__ ("rdtsc":"=a" (cycles), "=d" (cyclesHi)); return (cyclesHi << 32) | cycles; } #else // _MSC_VER extern "C" ptrdiff_t get_cycle_count(void); #endif // _MSC_VER #elif defined(TARGET_LOONGARCH64) static ptrdiff_t get_cycle_count() { ////FIXME: TODO for LOONGARCH64: //ptrdiff_t cycle; __asm__ volatile ("break \n"); return 0; } #else static ptrdiff_t get_cycle_count() { // @ARMTODO, @ARM64TODO, @WASMTODO: cycle counter is not exposed to user mode. For now (until we can show this // makes a difference on the configurations on which we'll run) just return 0. This will result in // all buffer access times being reported as equal in access_time(). return 0; } #endif //TARGET_X86 // We may not be on contiguous numa nodes so need to store // the node index as well. struct node_heap_count { int node_no; int heap_count; }; class heap_select { heap_select() {} public: static uint8_t* sniff_buffer; static unsigned n_sniff_buffers; static unsigned cur_sniff_index; static uint16_t proc_no_to_heap_no[MAX_SUPPORTED_CPUS]; static uint16_t heap_no_to_proc_no[MAX_SUPPORTED_CPUS]; static uint16_t heap_no_to_numa_node[MAX_SUPPORTED_CPUS]; static uint16_t proc_no_to_numa_node[MAX_SUPPORTED_CPUS]; static uint16_t numa_node_to_heap_map[MAX_SUPPORTED_CPUS+4]; // Note this is the total numa nodes GC heaps are on. There might be // more on the machine if GC threads aren't using all of them. static uint16_t total_numa_nodes; static node_heap_count heaps_on_node[MAX_SUPPORTED_NODES]; static int access_time(uint8_t *sniff_buffer, int heap_number, unsigned sniff_index, unsigned n_sniff_buffers) { ptrdiff_t start_cycles = get_cycle_count(); uint8_t sniff = sniff_buffer[(1 + heap_number*n_sniff_buffers + sniff_index)*HS_CACHE_LINE_SIZE]; assert (sniff == 0); ptrdiff_t elapsed_cycles = get_cycle_count() - start_cycles; // add sniff here just to defeat the optimizer elapsed_cycles += sniff; return (int) elapsed_cycles; } public: static BOOL init(int n_heaps) { assert (sniff_buffer == NULL && n_sniff_buffers == 0); if (!GCToOSInterface::CanGetCurrentProcessorNumber()) { n_sniff_buffers = n_heaps*2+1; size_t n_cache_lines = 1 + n_heaps * n_sniff_buffers + 1; size_t sniff_buf_size = n_cache_lines * HS_CACHE_LINE_SIZE; if (sniff_buf_size / HS_CACHE_LINE_SIZE != n_cache_lines) // check for overlow { return FALSE; } sniff_buffer = new (nothrow) uint8_t[sniff_buf_size]; if (sniff_buffer == 0) return FALSE; memset(sniff_buffer, 0, sniff_buf_size*sizeof(uint8_t)); } bool do_numa = GCToOSInterface::CanEnableGCNumaAware(); // we want to assign heap indices such that there is a contiguous // range of heap numbers for each numa node // we do this in two passes: // 1. gather processor numbers and numa node numbers for all heaps // 2. assign heap numbers for each numa node // Pass 1: gather processor numbers and numa node numbers uint16_t proc_no[MAX_SUPPORTED_CPUS]; uint16_t node_no[MAX_SUPPORTED_CPUS]; uint16_t max_node_no = 0; for (int i = 0; i < n_heaps; i++) { if (!GCToOSInterface::GetProcessorForHeap (i, &proc_no[i], &node_no[i])) break; if (!do_numa || node_no[i] == NUMA_NODE_UNDEFINED) node_no[i] = 0; max_node_no = max(max_node_no, node_no[i]); } // Pass 2: assign heap numbers by numa node int cur_heap_no = 0; for (uint16_t cur_node_no = 0; cur_node_no <= max_node_no; cur_node_no++) { for (int i = 0; i < n_heaps; i++) { if (node_no[i] != cur_node_no) continue; // we found a heap on cur_node_no heap_no_to_proc_no[cur_heap_no] = proc_no[i]; heap_no_to_numa_node[cur_heap_no] = cur_node_no; proc_no_to_numa_node[proc_no[i]] = cur_node_no; cur_heap_no++; } } return TRUE; } static void init_cpu_mapping(int heap_number) { if (GCToOSInterface::CanGetCurrentProcessorNumber()) { uint32_t proc_no = GCToOSInterface::GetCurrentProcessorNumber(); proc_no_to_heap_no[proc_no] = (uint16_t)heap_number; } } static void mark_heap(int heap_number) { if (GCToOSInterface::CanGetCurrentProcessorNumber()) return; for (unsigned sniff_index = 0; sniff_index < n_sniff_buffers; sniff_index++) sniff_buffer[(1 + heap_number*n_sniff_buffers + sniff_index)*HS_CACHE_LINE_SIZE] &= 1; } static int select_heap(alloc_context* acontext) { #ifndef TRACE_GC UNREFERENCED_PARAMETER(acontext); // only referenced by dprintf #endif //TRACE_GC if (GCToOSInterface::CanGetCurrentProcessorNumber()) { uint32_t proc_no = GCToOSInterface::GetCurrentProcessorNumber(); return proc_no_to_heap_no[proc_no]; } unsigned sniff_index = Interlocked::Increment(&cur_sniff_index); sniff_index %= n_sniff_buffers; int best_heap = 0; int best_access_time = 1000*1000*1000; int second_best_access_time = best_access_time; uint8_t *l_sniff_buffer = sniff_buffer; unsigned l_n_sniff_buffers = n_sniff_buffers; for (int heap_number = 0; heap_number < gc_heap::n_heaps; heap_number++) { int this_access_time = access_time(l_sniff_buffer, heap_number, sniff_index, l_n_sniff_buffers); if (this_access_time < best_access_time) { second_best_access_time = best_access_time; best_access_time = this_access_time; best_heap = heap_number; } else if (this_access_time < second_best_access_time) { second_best_access_time = this_access_time; } } if (best_access_time*2 < second_best_access_time) { sniff_buffer[(1 + best_heap*n_sniff_buffers + sniff_index)*HS_CACHE_LINE_SIZE] &= 1; dprintf (3, ("select_heap yields crisp %d for context %p\n", best_heap, (void *)acontext)); } else { dprintf (3, ("select_heap yields vague %d for context %p\n", best_heap, (void *)acontext )); } return best_heap; } static bool can_find_heap_fast() { return GCToOSInterface::CanGetCurrentProcessorNumber(); } static uint16_t find_heap_no_from_proc_no(uint16_t proc_no) { return proc_no_to_heap_no[proc_no]; } static uint16_t find_proc_no_from_heap_no(int heap_number) { return heap_no_to_proc_no[heap_number]; } static void set_proc_no_for_heap(int heap_number, uint16_t proc_no) { heap_no_to_proc_no[heap_number] = proc_no; } static uint16_t find_numa_node_from_heap_no(int heap_number) { return heap_no_to_numa_node[heap_number]; } static uint16_t find_numa_node_from_proc_no (uint16_t proc_no) { return proc_no_to_numa_node[proc_no]; } static void set_numa_node_for_heap_and_proc(int heap_number, uint16_t proc_no, uint16_t numa_node) { heap_no_to_numa_node[heap_number] = numa_node; proc_no_to_numa_node[proc_no] = numa_node; } static void init_numa_node_to_heap_map(int nheaps) { // Called right after GCHeap::Init() for each heap // For each NUMA node used by the heaps, the // numa_node_to_heap_map[numa_node] is set to the first heap number on that node and // numa_node_to_heap_map[numa_node + 1] is set to the first heap number not on that node // Set the start of the heap number range for the first NUMA node numa_node_to_heap_map[heap_no_to_numa_node[0]] = 0; total_numa_nodes = 0; memset (heaps_on_node, 0, sizeof (heaps_on_node)); heaps_on_node[0].node_no = heap_no_to_numa_node[0]; heaps_on_node[0].heap_count = 1; for (int i=1; i < nheaps; i++) { if (heap_no_to_numa_node[i] != heap_no_to_numa_node[i-1]) { total_numa_nodes++; heaps_on_node[total_numa_nodes].node_no = heap_no_to_numa_node[i]; // Set the end of the heap number range for the previous NUMA node numa_node_to_heap_map[heap_no_to_numa_node[i-1] + 1] = // Set the start of the heap number range for the current NUMA node numa_node_to_heap_map[heap_no_to_numa_node[i]] = (uint16_t)i; } (heaps_on_node[total_numa_nodes].heap_count)++; } // Set the end of the heap range for the last NUMA node numa_node_to_heap_map[heap_no_to_numa_node[nheaps-1] + 1] = (uint16_t)nheaps; //mark the end with nheaps total_numa_nodes++; } // TODO: curently this doesn't work with GCHeapAffinitizeMask/GCHeapAffinitizeRanges // because the heaps may not be on contiguous active procs. // // This is for scenarios where GCHeapCount is specified as something like // (g_num_active_processors - 2) to allow less randomization to the Server GC threads. // In this case we want to assign the right heaps to those procs, ie if they share // the same numa node we want to assign local heaps to those procs. Otherwise we // let the heap balancing mechanism take over for now. static void distribute_other_procs() { if (affinity_config_specified_p) return; uint16_t proc_no = 0; uint16_t node_no = 0; bool res = false; int start_heap = -1; int end_heap = -1; int current_node_no = -1; int current_heap_on_node = -1; for (int i = gc_heap::n_heaps; i < (int)g_num_active_processors; i++) { if (!GCToOSInterface::GetProcessorForHeap (i, &proc_no, &node_no)) break; int start_heap = (int)numa_node_to_heap_map[node_no]; int end_heap = (int)(numa_node_to_heap_map[node_no + 1]); if ((end_heap - start_heap) > 0) { if (node_no == current_node_no) { // We already iterated through all heaps on this node, don't add more procs to these // heaps. if (current_heap_on_node >= end_heap) { continue; } } else { current_node_no = node_no; current_heap_on_node = start_heap; } proc_no_to_heap_no[proc_no] = current_heap_on_node; proc_no_to_numa_node[proc_no] = node_no; current_heap_on_node++; } } } static void get_heap_range_for_heap(int hn, int* start, int* end) { uint16_t numa_node = heap_no_to_numa_node[hn]; *start = (int)numa_node_to_heap_map[numa_node]; *end = (int)(numa_node_to_heap_map[numa_node+1]); #ifdef HEAP_BALANCE_INSTRUMENTATION dprintf(HEAP_BALANCE_TEMP_LOG, ("TEMPget_heap_range: %d is in numa node %d, start = %d, end = %d", hn, numa_node, *start, *end)); #endif //HEAP_BALANCE_INSTRUMENTATION } // This gets the next valid numa node index starting at current_index+1. // It assumes that current_index is a valid node index. // If current_index+1 is at the end this will start at the beginning. So this will // always return a valid node index, along with that node's start/end heaps. static uint16_t get_next_numa_node (uint16_t current_index, int* start, int* end) { int start_index = current_index + 1; int nheaps = gc_heap::n_heaps; bool found_node_with_heaps_p = false; do { int start_heap = (int)numa_node_to_heap_map[start_index]; int end_heap = (int)numa_node_to_heap_map[start_index + 1]; if (start_heap == nheaps) { // This is the last node. start_index = 0; continue; } if ((end_heap - start_heap) == 0) { // This node has no heaps. start_index++; } else { found_node_with_heaps_p = true; *start = start_heap; *end = end_heap; } } while (!found_node_with_heaps_p); return start_index; } }; uint8_t* heap_select::sniff_buffer; unsigned heap_select::n_sniff_buffers; unsigned heap_select::cur_sniff_index; uint16_t heap_select::proc_no_to_heap_no[MAX_SUPPORTED_CPUS]; uint16_t heap_select::heap_no_to_proc_no[MAX_SUPPORTED_CPUS]; uint16_t heap_select::heap_no_to_numa_node[MAX_SUPPORTED_CPUS]; uint16_t heap_select::proc_no_to_numa_node[MAX_SUPPORTED_CPUS]; uint16_t heap_select::numa_node_to_heap_map[MAX_SUPPORTED_CPUS+4]; uint16_t heap_select::total_numa_nodes; node_heap_count heap_select::heaps_on_node[MAX_SUPPORTED_NODES]; #ifdef HEAP_BALANCE_INSTRUMENTATION // This records info we use to look at effect of different strategies // for heap balancing. struct heap_balance_info { uint64_t timestamp; // This also encodes when we detect the thread runs on // different proc during a balance attempt. Sometimes // I observe this happens multiple times during one attempt! // If this happens, I just record the last proc we observe // and set MSB. int tid; // This records the final alloc_heap for the thread. // // This also encodes the reason why we needed to set_home_heap // in balance_heaps. // If we set it because the home heap is not the same as the proc, // we set MSB. // // If we set ideal proc, we set the 2nd MSB. int alloc_heap; int ideal_proc_no; }; // This means inbetween each GC we can log at most this many entries per proc. // This is usually enough. Most of the time we only need to log something every 128k // of allocations in balance_heaps and gen0 budget is <= 200mb. #define default_max_hb_heap_balance_info 4096 struct heap_balance_info_proc { int count; int index; heap_balance_info hb_info[default_max_hb_heap_balance_info]; }; struct heap_balance_info_numa { heap_balance_info_proc* hb_info_procs; }; uint64_t start_raw_ts = 0; bool cpu_group_enabled_p = false; uint32_t procs_per_numa_node = 0; uint16_t total_numa_nodes_on_machine = 0; uint32_t procs_per_cpu_group = 0; uint16_t total_cpu_groups_on_machine = 0; // Note this is still on one of the numa nodes, so we'll incur a remote access // no matter what. heap_balance_info_numa* hb_info_numa_nodes = NULL; // TODO: This doesn't work for multiple nodes per CPU group yet. int get_proc_index_numa (int proc_no, int* numa_no) { if (total_numa_nodes_on_machine == 1) { *numa_no = 0; return proc_no; } else { if (cpu_group_enabled_p) { // see vm\gcenv.os.cpp GroupProcNo implementation. *numa_no = proc_no >> 6; return (proc_no % 64); } else { *numa_no = proc_no / procs_per_numa_node; return (proc_no % procs_per_numa_node); } } } // We could consider optimizing it so we don't need to get the tid // everytime but it's not very expensive to get. void add_to_hb_numa ( int proc_no, int ideal_proc_no, int alloc_heap, bool multiple_procs_p, bool alloc_count_p, bool set_ideal_p) { int tid = (int)GCToOSInterface::GetCurrentThreadIdForLogging (); uint64_t timestamp = RawGetHighPrecisionTimeStamp (); int saved_proc_no = proc_no; int numa_no = -1; proc_no = get_proc_index_numa (proc_no, &numa_no); heap_balance_info_numa* hb_info_numa_node = &hb_info_numa_nodes[numa_no]; heap_balance_info_proc* hb_info_proc = &(hb_info_numa_node->hb_info_procs[proc_no]); int index = hb_info_proc->index; int count = hb_info_proc->count; if (index == count) { // Too much info inbetween GCs. This can happen if the thread is scheduled on a different // processor very often so it caused us to log many entries due to that reason. You could // increase default_max_hb_heap_balance_info but this usually indicates a problem that // should be investigated. dprintf (HEAP_BALANCE_LOG, ("too much info between GCs, already logged %d entries", index)); GCToOSInterface::DebugBreak (); } heap_balance_info* hb_info = &(hb_info_proc->hb_info[index]); dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMP[p%3d->%3d(i:%3d), N%d] #%4d: %I64d, tid %d, ah: %d, m: %d, p: %d, i: %d", saved_proc_no, proc_no, ideal_proc_no, numa_no, index, (timestamp - start_raw_ts) / 1000, tid, alloc_heap, (int)multiple_procs_p, (int)(!alloc_count_p), (int)set_ideal_p)); if (multiple_procs_p) { tid |= (1 << (sizeof (tid) * 8 - 1)); } if (!alloc_count_p) { alloc_heap |= (1 << (sizeof (alloc_heap) * 8 - 1)); } if (set_ideal_p) { alloc_heap |= (1 << (sizeof (alloc_heap) * 8 - 2)); } hb_info->timestamp = timestamp; hb_info->tid = tid; hb_info->alloc_heap = alloc_heap; hb_info->ideal_proc_no = ideal_proc_no; (hb_info_proc->index)++; } const int hb_log_buffer_size = 4096; static char hb_log_buffer[hb_log_buffer_size]; int last_hb_recorded_gc_index = -1; #endif //HEAP_BALANCE_INSTRUMENTATION // This logs what we recorded in balance_heaps // The format for this is // // [ms since last GC end] // [cpu index] // all elements we stored before this GC for this CPU in the format // timestamp,tid, alloc_heap_no // repeat this for each CPU // // the timestamp here is just the result of calling QPC, // it's not converted to ms. The conversion will be done when we process // the log. void gc_heap::hb_log_balance_activities() { #ifdef HEAP_BALANCE_INSTRUMENTATION char* log_buffer = hb_log_buffer; uint64_t now = GetHighPrecisionTimeStamp(); size_t time_since_last_gc_ms = (size_t)((now - last_gc_end_time_us) / 1000); dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMP%Id - %Id = %Id", now, last_gc_end_time_ms, time_since_last_gc_ms)); // We want to get the min and the max timestamp for all procs because it helps with our post processing // to know how big an array to allocate to display the history inbetween the GCs. uint64_t min_timestamp = 0xffffffffffffffff; uint64_t max_timestamp = 0; for (int numa_node_index = 0; numa_node_index < total_numa_nodes_on_machine; numa_node_index++) { heap_balance_info_proc* hb_info_procs = hb_info_numa_nodes[numa_node_index].hb_info_procs; for (int proc_index = 0; proc_index < (int)procs_per_numa_node; proc_index++) { heap_balance_info_proc* hb_info_proc = &hb_info_procs[proc_index]; int total_entries_on_proc = hb_info_proc->index; if (total_entries_on_proc > 0) { min_timestamp = min (min_timestamp, hb_info_proc->hb_info[0].timestamp); max_timestamp = max (max_timestamp, hb_info_proc->hb_info[total_entries_on_proc - 1].timestamp); } } } dprintf (HEAP_BALANCE_LOG, ("[GCA#%Id %Id-%I64d-%I64d]", settings.gc_index, time_since_last_gc_ms, (min_timestamp - start_raw_ts), (max_timestamp - start_raw_ts))); if (last_hb_recorded_gc_index == (int)settings.gc_index) { GCToOSInterface::DebugBreak (); } last_hb_recorded_gc_index = (int)settings.gc_index; // When we print out the proc index we need to convert it to the actual proc index (this is contiguous). // It helps with post processing. for (int numa_node_index = 0; numa_node_index < total_numa_nodes_on_machine; numa_node_index++) { heap_balance_info_proc* hb_info_procs = hb_info_numa_nodes[numa_node_index].hb_info_procs; for (int proc_index = 0; proc_index < (int)procs_per_numa_node; proc_index++) { heap_balance_info_proc* hb_info_proc = &hb_info_procs[proc_index]; int total_entries_on_proc = hb_info_proc->index; if (total_entries_on_proc > 0) { int total_exec_time_ms = (int)((double)(hb_info_proc->hb_info[total_entries_on_proc - 1].timestamp - hb_info_proc->hb_info[0].timestamp) * qpf_ms); dprintf (HEAP_BALANCE_LOG, ("[p%d]-%d-%dms", (proc_index + numa_node_index * procs_per_numa_node), total_entries_on_proc, total_exec_time_ms)); } for (int i = 0; i < hb_info_proc->index; i++) { heap_balance_info* hb_info = &hb_info_proc->hb_info[i]; bool multiple_procs_p = false; bool alloc_count_p = true; bool set_ideal_p = false; int tid = hb_info->tid; int alloc_heap = hb_info->alloc_heap; if (tid & (1 << (sizeof (tid) * 8 - 1))) { multiple_procs_p = true; tid &= ~(1 << (sizeof (tid) * 8 - 1)); } if (alloc_heap & (1 << (sizeof (alloc_heap) * 8 - 1))) { alloc_count_p = false; alloc_heap &= ~(1 << (sizeof (alloc_heap) * 8 - 1)); } if (alloc_heap & (1 << (sizeof (alloc_heap) * 8 - 2))) { set_ideal_p = true; alloc_heap &= ~(1 << (sizeof (alloc_heap) * 8 - 2)); } // TODO - This assumes ideal proc is in the same cpu group which is not true // when we don't have CPU groups. int ideal_proc_no = hb_info->ideal_proc_no; int ideal_node_no = -1; ideal_proc_no = get_proc_index_numa (ideal_proc_no, &ideal_node_no); ideal_proc_no = ideal_proc_no + ideal_node_no * procs_per_numa_node; dprintf (HEAP_BALANCE_LOG, ("%I64d,%d,%d,%d%s%s%s", (hb_info->timestamp - start_raw_ts), tid, ideal_proc_no, (int)alloc_heap, (multiple_procs_p ? "|m" : ""), (!alloc_count_p ? "|p" : ""), (set_ideal_p ? "|i" : ""))); } } } for (int numa_node_index = 0; numa_node_index < total_numa_nodes_on_machine; numa_node_index++) { heap_balance_info_proc* hb_info_procs = hb_info_numa_nodes[numa_node_index].hb_info_procs; for (int proc_index = 0; proc_index < (int)procs_per_numa_node; proc_index++) { heap_balance_info_proc* hb_info_proc = &hb_info_procs[proc_index]; hb_info_proc->index = 0; } } #endif //HEAP_BALANCE_INSTRUMENTATION } // The format for this is // // [GC_alloc_mb] // h0_new_alloc, h1_new_alloc, ... // void gc_heap::hb_log_new_allocation() { #ifdef HEAP_BALANCE_INSTRUMENTATION char* log_buffer = hb_log_buffer; int desired_alloc_mb = (int)(dd_desired_allocation (g_heaps[0]->dynamic_data_of (0)) / 1024 / 1024); int buffer_pos = sprintf_s (hb_log_buffer, hb_log_buffer_size, "[GC_alloc_mb]\n"); for (int numa_node_index = 0; numa_node_index < heap_select::total_numa_nodes; numa_node_index++) { int node_allocated_mb = 0; // I'm printing out the budget here instead of the numa node index so we know how much // of the budget we consumed. buffer_pos += sprintf_s (hb_log_buffer + buffer_pos, hb_log_buffer_size - buffer_pos, "[N#%3d]", //numa_node_index); desired_alloc_mb); int heaps_on_node = heap_select::heaps_on_node[numa_node_index].heap_count; for (int heap_index = 0; heap_index < heaps_on_node; heap_index++) { int actual_heap_index = heap_index + numa_node_index * heaps_on_node; gc_heap* hp = g_heaps[actual_heap_index]; dynamic_data* dd0 = hp->dynamic_data_of (0); int allocated_mb = (int)((dd_desired_allocation (dd0) - dd_new_allocation (dd0)) / 1024 / 1024); node_allocated_mb += allocated_mb; buffer_pos += sprintf_s (hb_log_buffer + buffer_pos, hb_log_buffer_size - buffer_pos, "%d,", allocated_mb); } dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPN#%d a %dmb(%dmb)", numa_node_index, node_allocated_mb, desired_alloc_mb)); buffer_pos += sprintf_s (hb_log_buffer + buffer_pos, hb_log_buffer_size - buffer_pos, "\n"); } dprintf (HEAP_BALANCE_LOG, ("%s", hb_log_buffer)); #endif //HEAP_BALANCE_INSTRUMENTATION } BOOL gc_heap::create_thread_support (int number_of_heaps) { BOOL ret = FALSE; if (!gc_start_event.CreateOSManualEventNoThrow (FALSE)) { goto cleanup; } if (!ee_suspend_event.CreateOSAutoEventNoThrow (FALSE)) { goto cleanup; } if (!gc_t_join.init (number_of_heaps, join_flavor_server_gc)) { goto cleanup; } ret = TRUE; cleanup: if (!ret) { destroy_thread_support(); } return ret; } void gc_heap::destroy_thread_support () { if (ee_suspend_event.IsValid()) { ee_suspend_event.CloseEvent(); } if (gc_start_event.IsValid()) { gc_start_event.CloseEvent(); } } void set_thread_affinity_for_heap (int heap_number, uint16_t proc_no) { if (!GCToOSInterface::SetThreadAffinity (proc_no)) { dprintf (1, ("Failed to set thread affinity for GC thread %d on proc #%d", heap_number, proc_no)); } } bool gc_heap::create_gc_thread () { dprintf (3, ("Creating gc thread\n")); return GCToEEInterface::CreateThread(gc_thread_stub, this, false, ".NET Server GC"); } #ifdef _MSC_VER #pragma warning(disable:4715) //IA64 xcompiler recognizes that without the 'break;' the while(1) will never end and therefore not return a value for that code path #endif //_MSC_VER void gc_heap::gc_thread_function () { assert (gc_done_event.IsValid()); assert (gc_start_event.IsValid()); dprintf (3, ("gc thread started")); heap_select::init_cpu_mapping(heap_number); while (1) { assert (!gc_t_join.joined()); if (heap_number == 0) { uint32_t wait_result = gc_heap::ee_suspend_event.Wait(gradual_decommit_in_progress_p ? DECOMMIT_TIME_STEP_MILLISECONDS : INFINITE, FALSE); if (wait_result == WAIT_TIMEOUT) { gradual_decommit_in_progress_p = decommit_step (); continue; } suspended_start_time = GetHighPrecisionTimeStamp(); BEGIN_TIMING(suspend_ee_during_log); GCToEEInterface::SuspendEE(SUSPEND_FOR_GC); END_TIMING(suspend_ee_during_log); proceed_with_gc_p = TRUE; gradual_decommit_in_progress_p = FALSE; if (!should_proceed_with_gc()) { update_collection_counts_for_no_gc(); proceed_with_gc_p = FALSE; } else { settings.init_mechanisms(); gc_start_event.Set(); } dprintf (3, (ThreadStressLog::gcServerThread0StartMsg(), heap_number)); } else { gc_start_event.Wait(INFINITE, FALSE); dprintf (3, (ThreadStressLog::gcServerThreadNStartMsg(), heap_number)); } assert ((heap_number == 0) || proceed_with_gc_p); if (proceed_with_gc_p) { garbage_collect (GCHeap::GcCondemnedGeneration); if (pm_trigger_full_gc) { garbage_collect_pm_full_gc(); } } if (heap_number == 0) { if (proceed_with_gc_p && (!settings.concurrent)) { do_post_gc(); } #ifdef BACKGROUND_GC recover_bgc_settings(); #endif //BACKGROUND_GC #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; hp->add_saved_spinlock_info (false, me_release, mt_block_gc); leave_spin_lock(&hp->more_space_lock_soh); } #endif //MULTIPLE_HEAPS gc_heap::gc_started = FALSE; #ifdef BACKGROUND_GC gc_heap::add_bgc_pause_duration_0(); #endif //BACKGROUND_GC BEGIN_TIMING(restart_ee_during_log); GCToEEInterface::RestartEE(TRUE); END_TIMING(restart_ee_during_log); process_sync_log_stats(); dprintf (SPINLOCK_LOG, ("GC Lgc")); leave_spin_lock (&gc_heap::gc_lock); gc_heap::internal_gc_done = true; if (proceed_with_gc_p) set_gc_done(); else { // If we didn't actually do a GC, it means we didn't wait up the other threads, // we still need to set the gc_done_event for those threads. for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; hp->set_gc_done(); } } // check if we should do some decommitting if (gradual_decommit_in_progress_p) { gradual_decommit_in_progress_p = decommit_step (); } } else { int spin_count = 32 * (gc_heap::n_heaps - 1); // wait until RestartEE has progressed to a stage where we can restart user threads while (!gc_heap::internal_gc_done && !GCHeap::SafeToRestartManagedThreads()) { spin_and_switch (spin_count, (gc_heap::internal_gc_done || GCHeap::SafeToRestartManagedThreads())); } set_gc_done(); } } } #ifdef _MSC_VER #pragma warning(default:4715) //IA64 xcompiler recognizes that without the 'break;' the while(1) will never end and therefore not return a value for that code path #endif //_MSC_VER #endif //MULTIPLE_HEAPS bool gc_heap::virtual_alloc_commit_for_heap (void* addr, size_t size, int h_number) { #if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK) // Currently there is no way for us to specific the numa node to allocate on via hosting interfaces to // a host. This will need to be added later. #if !defined(FEATURE_CORECLR) && !defined(BUILD_AS_STANDALONE) if (!CLRMemoryHosted()) #endif { if (GCToOSInterface::CanEnableGCNumaAware()) { uint16_t numa_node = heap_select::find_numa_node_from_heap_no(h_number); if (GCToOSInterface::VirtualCommit (addr, size, numa_node)) return true; } } #else //MULTIPLE_HEAPS && !FEATURE_REDHAWK UNREFERENCED_PARAMETER(h_number); #endif //MULTIPLE_HEAPS && !FEATURE_REDHAWK //numa aware not enabled, or call failed --> fallback to VirtualCommit() return GCToOSInterface::VirtualCommit(addr, size); } bool gc_heap::virtual_commit (void* address, size_t size, gc_oh_num oh, int h_number, bool* hard_limit_exceeded_p) { #ifndef HOST_64BIT assert (heap_hard_limit == 0); #endif //!HOST_64BIT if (heap_hard_limit) { check_commit_cs.Enter(); bool exceeded_p = false; if (heap_hard_limit_oh[soh] != 0) { if ((oh != gc_oh_num::none) && (committed_by_oh[oh] + size) > heap_hard_limit_oh[oh]) { exceeded_p = true; } } else if ((current_total_committed + size) > heap_hard_limit) { dprintf (1, ("%Id + %Id = %Id > limit %Id ", current_total_committed, size, (current_total_committed + size), heap_hard_limit)); exceeded_p = true; } if (!exceeded_p) { committed_by_oh[oh] += size; current_total_committed += size; if (h_number < 0) current_total_committed_bookkeeping += size; } check_commit_cs.Leave(); if (hard_limit_exceeded_p) *hard_limit_exceeded_p = exceeded_p; if (exceeded_p) { dprintf (1, ("can't commit %Ix for %Id bytes > HARD LIMIT %Id", (size_t)address, size, heap_hard_limit)); return false; } } // If it's a valid heap number it means it's commiting for memory on the GC heap. // In addition if large pages is enabled, we set commit_succeeded_p to true because memory is already committed. bool commit_succeeded_p = ((h_number >= 0) ? (use_large_pages_p ? true : virtual_alloc_commit_for_heap (address, size, h_number)) : GCToOSInterface::VirtualCommit(address, size)); if (!commit_succeeded_p && heap_hard_limit) { check_commit_cs.Enter(); committed_by_oh[oh] -= size; dprintf (1, ("commit failed, updating %Id to %Id", current_total_committed, (current_total_committed - size))); current_total_committed -= size; if (h_number < 0) current_total_committed_bookkeeping -= size; check_commit_cs.Leave(); } return commit_succeeded_p; } bool gc_heap::virtual_decommit (void* address, size_t size, gc_oh_num oh, int h_number) { #ifndef HOST_64BIT assert (heap_hard_limit == 0); #endif //!HOST_64BIT bool decommit_succeeded_p = GCToOSInterface::VirtualDecommit (address, size); if (decommit_succeeded_p && heap_hard_limit) { check_commit_cs.Enter(); committed_by_oh[oh] -= size; current_total_committed -= size; if (h_number < 0) current_total_committed_bookkeeping -= size; check_commit_cs.Leave(); } return decommit_succeeded_p; } void gc_heap::virtual_free (void* add, size_t allocated_size, heap_segment* sg) { bool release_succeeded_p = GCToOSInterface::VirtualRelease (add, allocated_size); if (release_succeeded_p) { reserved_memory -= allocated_size; dprintf (2, ("Virtual Free size %Id: [%Ix, %Ix[", allocated_size, (size_t)add, (size_t)((uint8_t*)add + allocated_size))); } } class mark { public: uint8_t* first; size_t len; // If we want to save space we can have a pool of plug_and_gap's instead of // always having 2 allocated for each pinned plug. gap_reloc_pair saved_pre_plug; // If we decide to not compact, we need to restore the original values. gap_reloc_pair saved_pre_plug_reloc; gap_reloc_pair saved_post_plug; // Supposedly Pinned objects cannot have references but we are seeing some from pinvoke // frames. Also if it's an artificially pinned plug created by us, it can certainly // have references. // We know these cases will be rare so we can optimize this to be only allocated on demand. gap_reloc_pair saved_post_plug_reloc; // We need to calculate this after we are done with plan phase and before compact // phase because compact phase will change the bricks so relocate_address will no // longer work. uint8_t* saved_pre_plug_info_reloc_start; // We need to save this because we will have no way to calculate it, unlike the // pre plug info start which is right before this plug. uint8_t* saved_post_plug_info_start; #ifdef SHORT_PLUGS uint8_t* allocation_context_start_region; #endif //SHORT_PLUGS // How the bits in these bytes are organized: // MSB --> LSB // bit to indicate whether it's a short obj | 3 bits for refs in this short obj | 2 unused bits | bit to indicate if it's collectible | last bit // last bit indicates if there's pre or post info associated with this plug. If it's not set all other bits will be 0. BOOL saved_pre_p; BOOL saved_post_p; #ifdef _DEBUG // We are seeing this is getting corrupted for a PP with a NP after. // Save it when we first set it and make sure it doesn't change. gap_reloc_pair saved_post_plug_debug; #endif //_DEBUG size_t get_max_short_bits() { return (sizeof (gap_reloc_pair) / sizeof (uint8_t*)); } // pre bits size_t get_pre_short_start_bit () { return (sizeof (saved_pre_p) * 8 - 1 - (sizeof (gap_reloc_pair) / sizeof (uint8_t*))); } BOOL pre_short_p() { return (saved_pre_p & (1 << (sizeof (saved_pre_p) * 8 - 1))); } void set_pre_short() { saved_pre_p |= (1 << (sizeof (saved_pre_p) * 8 - 1)); } void set_pre_short_bit (size_t bit) { saved_pre_p |= 1 << (get_pre_short_start_bit() + bit); } BOOL pre_short_bit_p (size_t bit) { return (saved_pre_p & (1 << (get_pre_short_start_bit() + bit))); } #ifdef COLLECTIBLE_CLASS void set_pre_short_collectible() { saved_pre_p |= 2; } BOOL pre_short_collectible_p() { return (saved_pre_p & 2); } #endif //COLLECTIBLE_CLASS // post bits size_t get_post_short_start_bit () { return (sizeof (saved_post_p) * 8 - 1 - (sizeof (gap_reloc_pair) / sizeof (uint8_t*))); } BOOL post_short_p() { return (saved_post_p & (1 << (sizeof (saved_post_p) * 8 - 1))); } void set_post_short() { saved_post_p |= (1 << (sizeof (saved_post_p) * 8 - 1)); } void set_post_short_bit (size_t bit) { saved_post_p |= 1 << (get_post_short_start_bit() + bit); } BOOL post_short_bit_p (size_t bit) { return (saved_post_p & (1 << (get_post_short_start_bit() + bit))); } #ifdef COLLECTIBLE_CLASS void set_post_short_collectible() { saved_post_p |= 2; } BOOL post_short_collectible_p() { return (saved_post_p & 2); } #endif //COLLECTIBLE_CLASS uint8_t* get_plug_address() { return first; } BOOL has_pre_plug_info() { return saved_pre_p; } BOOL has_post_plug_info() { return saved_post_p; } gap_reloc_pair* get_pre_plug_reloc_info() { return &saved_pre_plug_reloc; } gap_reloc_pair* get_post_plug_reloc_info() { return &saved_post_plug_reloc; } void set_pre_plug_info_reloc_start (uint8_t* reloc) { saved_pre_plug_info_reloc_start = reloc; } uint8_t* get_post_plug_info_start() { return saved_post_plug_info_start; } // We need to temporarily recover the shortened plugs for compact phase so we can // copy over the whole plug and their related info (mark bits/cards). But we will // need to set the artificial gap back so compact phase can keep reading the plug info. // We also need to recover the saved info because we'll need to recover it later. // // So we would call swap_p*_plug_and_saved once to recover the object info; then call // it again to recover the artificial gap. void swap_pre_plug_and_saved() { gap_reloc_pair temp; memcpy (&temp, (first - sizeof (plug_and_gap)), sizeof (temp)); memcpy ((first - sizeof (plug_and_gap)), &saved_pre_plug_reloc, sizeof (saved_pre_plug_reloc)); saved_pre_plug_reloc = temp; } void swap_post_plug_and_saved() { gap_reloc_pair temp; memcpy (&temp, saved_post_plug_info_start, sizeof (temp)); memcpy (saved_post_plug_info_start, &saved_post_plug_reloc, sizeof (saved_post_plug_reloc)); saved_post_plug_reloc = temp; } void swap_pre_plug_and_saved_for_profiler() { gap_reloc_pair temp; memcpy (&temp, (first - sizeof (plug_and_gap)), sizeof (temp)); memcpy ((first - sizeof (plug_and_gap)), &saved_pre_plug, sizeof (saved_pre_plug)); saved_pre_plug = temp; } void swap_post_plug_and_saved_for_profiler() { gap_reloc_pair temp; memcpy (&temp, saved_post_plug_info_start, sizeof (temp)); memcpy (saved_post_plug_info_start, &saved_post_plug, sizeof (saved_post_plug)); saved_post_plug = temp; } // We should think about whether it's really necessary to have to copy back the pre plug // info since it was already copied during compacting plugs. But if a plug doesn't move // by >= 3 ptr size (the size of gap_reloc_pair), it means we'd have to recover pre plug info. size_t recover_plug_info() { // We need to calculate the size for sweep case in order to correctly record the // free_obj_space - sweep would've made these artifical gaps into free objects and // we would need to deduct the size because now we are writing into those free objects. size_t recovered_sweep_size = 0; if (saved_pre_p) { if (gc_heap::settings.compaction) { dprintf (3, ("%Ix: REC Pre: %Ix-%Ix", first, &saved_pre_plug_reloc, saved_pre_plug_info_reloc_start)); memcpy (saved_pre_plug_info_reloc_start, &saved_pre_plug_reloc, sizeof (saved_pre_plug_reloc)); } else { dprintf (3, ("%Ix: REC Pre: %Ix-%Ix", first, &saved_pre_plug, (first - sizeof (plug_and_gap)))); memcpy ((first - sizeof (plug_and_gap)), &saved_pre_plug, sizeof (saved_pre_plug)); recovered_sweep_size += sizeof (saved_pre_plug); } } if (saved_post_p) { if (gc_heap::settings.compaction) { dprintf (3, ("%Ix: REC Post: %Ix-%Ix", first, &saved_post_plug_reloc, saved_post_plug_info_start)); memcpy (saved_post_plug_info_start, &saved_post_plug_reloc, sizeof (saved_post_plug_reloc)); } else { dprintf (3, ("%Ix: REC Post: %Ix-%Ix", first, &saved_post_plug, saved_post_plug_info_start)); memcpy (saved_post_plug_info_start, &saved_post_plug, sizeof (saved_post_plug)); recovered_sweep_size += sizeof (saved_post_plug); } } return recovered_sweep_size; } }; void gc_mechanisms::init_mechanisms() { condemned_generation = 0; promotion = FALSE;//TRUE; compaction = TRUE; #ifdef FEATURE_LOH_COMPACTION loh_compaction = gc_heap::loh_compaction_requested(); #else loh_compaction = FALSE; #endif //FEATURE_LOH_COMPACTION heap_expansion = FALSE; concurrent = FALSE; demotion = FALSE; elevation_reduced = FALSE; found_finalizers = FALSE; #ifdef BACKGROUND_GC background_p = gc_heap::background_running_p() != FALSE; #endif //BACKGROUND_GC entry_memory_load = 0; entry_available_physical_mem = 0; exit_memory_load = 0; #ifdef STRESS_HEAP stress_induced = FALSE; #endif // STRESS_HEAP } void gc_mechanisms::first_init() { gc_index = 0; gen0_reduction_count = 0; should_lock_elevation = FALSE; elevation_locked_count = 0; reason = reason_empty; #ifdef BACKGROUND_GC pause_mode = gc_heap::gc_can_use_concurrent ? pause_interactive : pause_batch; #ifdef _DEBUG int debug_pause_mode = static_cast<int>(GCConfig::GetLatencyMode()); if (debug_pause_mode >= 0) { assert (debug_pause_mode <= pause_sustained_low_latency); pause_mode = (gc_pause_mode)debug_pause_mode; } #endif //_DEBUG #else //BACKGROUND_GC pause_mode = pause_batch; #endif //BACKGROUND_GC init_mechanisms(); } void gc_mechanisms::record (gc_history_global* history) { #ifdef MULTIPLE_HEAPS history->num_heaps = gc_heap::n_heaps; #else history->num_heaps = 1; #endif //MULTIPLE_HEAPS history->condemned_generation = condemned_generation; history->gen0_reduction_count = gen0_reduction_count; history->reason = reason; history->pause_mode = (int)pause_mode; history->mem_pressure = entry_memory_load; history->global_mechanisms_p = 0; // start setting the boolean values. if (concurrent) history->set_mechanism_p (global_concurrent); if (compaction) history->set_mechanism_p (global_compaction); if (promotion) history->set_mechanism_p (global_promotion); if (demotion) history->set_mechanism_p (global_demotion); if (card_bundles) history->set_mechanism_p (global_card_bundles); if (elevation_reduced) history->set_mechanism_p (global_elevation); } /********************************** called at the beginning of GC to fix the allocated size to what is really allocated, or to turn the free area into an unused object It needs to be called after all of the other allocation contexts have been fixed since it relies on alloc_allocated. ********************************/ //for_gc_p indicates that the work is being done for GC, //as opposed to concurrent heap verification void gc_heap::fix_youngest_allocation_area() { // The gen 0 alloc context is never used for allocation in the allocator path. It's // still used in the allocation path during GCs. assert (generation_allocation_pointer (youngest_generation) == nullptr); assert (generation_allocation_limit (youngest_generation) == nullptr); heap_segment_allocated (ephemeral_heap_segment) = alloc_allocated; assert (heap_segment_mem (ephemeral_heap_segment) <= heap_segment_allocated (ephemeral_heap_segment)); assert (heap_segment_allocated (ephemeral_heap_segment) <= heap_segment_reserved (ephemeral_heap_segment)); } //for_gc_p indicates that the work is being done for GC, //as opposed to concurrent heap verification void gc_heap::fix_allocation_context (alloc_context* acontext, BOOL for_gc_p, BOOL record_ac_p) { dprintf (3, ("Fixing allocation context %Ix: ptr: %Ix, limit: %Ix", (size_t)acontext, (size_t)acontext->alloc_ptr, (size_t)acontext->alloc_limit)); if (acontext->alloc_ptr == 0) { return; } int align_const = get_alignment_constant (TRUE); #ifdef USE_REGIONS bool is_ephemeral_heap_segment = in_range_for_segment (acontext->alloc_limit, ephemeral_heap_segment); #else // USE_REGIONS bool is_ephemeral_heap_segment = true; #endif // USE_REGIONS if ((!is_ephemeral_heap_segment) || ((size_t)(alloc_allocated - acontext->alloc_limit) > Align (min_obj_size, align_const)) || !for_gc_p) { uint8_t* point = acontext->alloc_ptr; size_t size = (acontext->alloc_limit - acontext->alloc_ptr); // the allocation area was from the free list // it was shortened by Align (min_obj_size) to make room for // at least the shortest unused object size += Align (min_obj_size, align_const); assert ((size >= Align (min_obj_size))); dprintf(3,("Making unused area [%Ix, %Ix[", (size_t)point, (size_t)point + size )); make_unused_array (point, size); if (for_gc_p) { generation_free_obj_space (generation_of (0)) += size; if (record_ac_p) alloc_contexts_used ++; } } else if (for_gc_p) { assert (is_ephemeral_heap_segment); alloc_allocated = acontext->alloc_ptr; assert (heap_segment_allocated (ephemeral_heap_segment) <= heap_segment_committed (ephemeral_heap_segment)); if (record_ac_p) alloc_contexts_used ++; } if (for_gc_p) { // We need to update the alloc_bytes to reflect the portion that we have not used acontext->alloc_bytes -= (acontext->alloc_limit - acontext->alloc_ptr); total_alloc_bytes_soh -= (acontext->alloc_limit - acontext->alloc_ptr); acontext->alloc_ptr = 0; acontext->alloc_limit = acontext->alloc_ptr; } } //used by the heap verification for concurrent gc. //it nulls out the words set by fix_allocation_context for heap_verification void repair_allocation (gc_alloc_context* acontext, void*) { uint8_t* point = acontext->alloc_ptr; if (point != 0) { dprintf (3, ("Clearing [%Ix, %Ix[", (size_t)acontext->alloc_ptr, (size_t)acontext->alloc_limit+Align(min_obj_size))); memclr (acontext->alloc_ptr - plug_skew, (acontext->alloc_limit - acontext->alloc_ptr)+Align (min_obj_size)); } } void void_allocation (gc_alloc_context* acontext, void*) { uint8_t* point = acontext->alloc_ptr; if (point != 0) { dprintf (3, ("Void [%Ix, %Ix[", (size_t)acontext->alloc_ptr, (size_t)acontext->alloc_limit+Align(min_obj_size))); acontext->alloc_ptr = 0; acontext->alloc_limit = acontext->alloc_ptr; } } void gc_heap::repair_allocation_contexts (BOOL repair_p) { GCToEEInterface::GcEnumAllocContexts (repair_p ? repair_allocation : void_allocation, NULL); } struct fix_alloc_context_args { BOOL for_gc_p; void* heap; }; void fix_alloc_context (gc_alloc_context* acontext, void* param) { fix_alloc_context_args* args = (fix_alloc_context_args*)param; g_theGCHeap->FixAllocContext(acontext, (void*)(size_t)(args->for_gc_p), args->heap); } void gc_heap::fix_allocation_contexts (BOOL for_gc_p) { fix_alloc_context_args args; args.for_gc_p = for_gc_p; args.heap = __this; GCToEEInterface::GcEnumAllocContexts(fix_alloc_context, &args); fix_youngest_allocation_area(); } void gc_heap::fix_older_allocation_area (generation* older_gen) { heap_segment* older_gen_seg = generation_allocation_segment (older_gen); if (generation_allocation_limit (older_gen) != heap_segment_plan_allocated (older_gen_seg)) { uint8_t* point = generation_allocation_pointer (older_gen); size_t size = (generation_allocation_limit (older_gen) - generation_allocation_pointer (older_gen)); if (size != 0) { assert ((size >= Align (min_obj_size))); dprintf(3,("Making unused area [%Ix, %Ix[", (size_t)point, (size_t)point+size)); make_unused_array (point, size); if (size >= min_free_list) { generation_allocator (older_gen)->thread_item_front (point, size); add_gen_free (older_gen->gen_num, size); generation_free_list_space (older_gen) += size; } else { generation_free_obj_space (older_gen) += size; } } } else { assert (older_gen_seg != ephemeral_heap_segment); heap_segment_plan_allocated (older_gen_seg) = generation_allocation_pointer (older_gen); generation_allocation_limit (older_gen) = generation_allocation_pointer (older_gen); } generation_allocation_pointer (older_gen) = 0; generation_allocation_limit (older_gen) = 0; } void gc_heap::set_allocation_heap_segment (generation* gen) { #ifdef USE_REGIONS heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); dprintf (REGIONS_LOG, ("set gen%d alloc seg to start seg %Ix", gen->gen_num, heap_segment_mem (seg))); #else uint8_t* p = generation_allocation_start (gen); assert (p); heap_segment* seg = generation_allocation_segment (gen); if (in_range_for_segment (p, seg)) return; // try ephemeral heap segment in case of heap expansion seg = ephemeral_heap_segment; if (!in_range_for_segment (p, seg)) { seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(seg != NULL); while (!in_range_for_segment (p, seg)) { seg = heap_segment_next_rw (seg); PREFIX_ASSUME(seg != NULL); } } #endif //USE_REGIONS generation_allocation_segment (gen) = seg; } void gc_heap::reset_allocation_pointers (generation* gen, uint8_t* start) { assert (start); assert (Align ((size_t)start) == (size_t)start); #ifndef USE_REGIONS generation_allocation_start (gen) = start; #endif //!USE_REGIONS generation_allocation_pointer (gen) = 0;//start + Align (min_obj_size); generation_allocation_limit (gen) = 0;//generation_allocation_pointer (gen); set_allocation_heap_segment (gen); } bool gc_heap::new_allocation_allowed (int gen_number) { if (dd_new_allocation (dynamic_data_of (gen_number)) < 0) { if (gen_number != 0) { // For UOH we will give it more budget before we try a GC. if (settings.concurrent) { dynamic_data* dd2 = dynamic_data_of (gen_number); if (dd_new_allocation (dd2) <= (ptrdiff_t)(-2 * dd_desired_allocation (dd2))) { return TRUE; } } } return FALSE; } #ifndef MULTIPLE_HEAPS else if ((settings.pause_mode != pause_no_gc) && (gen_number == 0)) { dynamic_data* dd0 = dynamic_data_of (0); dprintf (3, ("evaluating, running amount %Id - new %Id = %Id", allocation_running_amount, dd_new_allocation (dd0), (allocation_running_amount - dd_new_allocation (dd0)))); if ((allocation_running_amount - dd_new_allocation (dd0)) > dd_min_size (dd0)) { uint32_t ctime = GCToOSInterface::GetLowPrecisionTimeStamp(); if ((ctime - allocation_running_time) > 1000) { dprintf (2, (">1s since last gen0 gc")); return FALSE; } else { allocation_running_amount = dd_new_allocation (dd0); } } } #endif //MULTIPLE_HEAPS return TRUE; } inline ptrdiff_t gc_heap::get_desired_allocation (int gen_number) { return dd_desired_allocation (dynamic_data_of (gen_number)); } inline ptrdiff_t gc_heap::get_new_allocation (int gen_number) { return dd_new_allocation (dynamic_data_of (gen_number)); } //return the amount allocated so far in gen_number inline ptrdiff_t gc_heap::get_allocation (int gen_number) { dynamic_data* dd = dynamic_data_of (gen_number); return dd_desired_allocation (dd) - dd_new_allocation (dd); } inline BOOL grow_mark_stack (mark*& m, size_t& len, size_t init_len) { size_t new_size = max (init_len, 2*len); mark* tmp = new (nothrow) mark [new_size]; if (tmp) { memcpy (tmp, m, len * sizeof (mark)); delete m; m = tmp; len = new_size; return TRUE; } else { dprintf (1, ("Failed to allocate %Id bytes for mark stack", (len * sizeof (mark)))); return FALSE; } } inline uint8_t* pinned_plug (mark* m) { return m->first; } inline size_t& pinned_len (mark* m) { return m->len; } inline void set_new_pin_info (mark* m, uint8_t* pin_free_space_start) { m->len = pinned_plug (m) - pin_free_space_start; #ifdef SHORT_PLUGS m->allocation_context_start_region = pin_free_space_start; #endif //SHORT_PLUGS } #ifdef SHORT_PLUGS inline uint8_t*& pin_allocation_context_start_region (mark* m) { return m->allocation_context_start_region; } uint8_t* get_plug_start_in_saved (uint8_t* old_loc, mark* pinned_plug_entry) { uint8_t* saved_pre_plug_info = (uint8_t*)(pinned_plug_entry->get_pre_plug_reloc_info()); uint8_t* plug_start_in_saved = saved_pre_plug_info + (old_loc - (pinned_plug (pinned_plug_entry) - sizeof (plug_and_gap))); //dprintf (1, ("detected a very short plug: %Ix before PP %Ix, pad %Ix", // old_loc, pinned_plug (pinned_plug_entry), plug_start_in_saved)); dprintf (1, ("EP: %Ix(%Ix), %Ix", old_loc, pinned_plug (pinned_plug_entry), plug_start_in_saved)); return plug_start_in_saved; } inline void set_padding_in_expand (uint8_t* old_loc, BOOL set_padding_on_saved_p, mark* pinned_plug_entry) { if (set_padding_on_saved_p) { set_plug_padded (get_plug_start_in_saved (old_loc, pinned_plug_entry)); } else { set_plug_padded (old_loc); } } inline void clear_padding_in_expand (uint8_t* old_loc, BOOL set_padding_on_saved_p, mark* pinned_plug_entry) { if (set_padding_on_saved_p) { clear_plug_padded (get_plug_start_in_saved (old_loc, pinned_plug_entry)); } else { clear_plug_padded (old_loc); } } #endif //SHORT_PLUGS void gc_heap::reset_pinned_queue() { mark_stack_tos = 0; mark_stack_bos = 0; } void gc_heap::reset_pinned_queue_bos() { mark_stack_bos = 0; } // last_pinned_plug is only for asserting purpose. void gc_heap::merge_with_last_pinned_plug (uint8_t* last_pinned_plug, size_t plug_size) { if (last_pinned_plug) { mark& last_m = mark_stack_array[mark_stack_tos - 1]; assert (last_pinned_plug == last_m.first); if (last_m.saved_post_p) { last_m.saved_post_p = FALSE; dprintf (3, ("setting last plug %Ix post to false", last_m.first)); // We need to recover what the gap has overwritten. memcpy ((last_m.first + last_m.len - sizeof (plug_and_gap)), &(last_m.saved_post_plug), sizeof (gap_reloc_pair)); } last_m.len += plug_size; dprintf (3, ("recovered the last part of plug %Ix, setting its plug size to %Ix", last_m.first, last_m.len)); } } void gc_heap::set_allocator_next_pin (generation* gen) { dprintf (3, ("SANP: gen%d, ptr; %Ix, limit: %Ix", gen->gen_num, generation_allocation_pointer (gen), generation_allocation_limit (gen))); if (!(pinned_plug_que_empty_p())) { mark* oldest_entry = oldest_pin(); uint8_t* plug = pinned_plug (oldest_entry); if ((plug >= generation_allocation_pointer (gen)) && (plug < generation_allocation_limit (gen))) { #ifdef USE_REGIONS assert (region_of (generation_allocation_pointer (gen)) == region_of (generation_allocation_limit (gen) - 1)); #endif //USE_REGIONS generation_allocation_limit (gen) = pinned_plug (oldest_entry); dprintf (3, ("SANP: get next pin free space in gen%d for alloc: %Ix->%Ix(%Id)", gen->gen_num, generation_allocation_pointer (gen), generation_allocation_limit (gen), (generation_allocation_limit (gen) - generation_allocation_pointer (gen)))); } else assert (!((plug < generation_allocation_pointer (gen)) && (plug >= heap_segment_mem (generation_allocation_segment (gen))))); } } // After we set the info, we increase tos. void gc_heap::set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, generation* gen) { #ifndef _DEBUG UNREFERENCED_PARAMETER(last_pinned_plug); #endif //_DEBUG mark& m = mark_stack_array[mark_stack_tos]; assert (m.first == last_pinned_plug); m.len = plug_len; mark_stack_tos++; assert (gen != 0); // Why are we checking here? gen is never 0. if (gen != 0) { set_allocator_next_pin (gen); } } size_t gc_heap::deque_pinned_plug () { size_t m = mark_stack_bos; dprintf (3, ("deque: %Id->%Ix", mark_stack_bos, pinned_plug (pinned_plug_of (m)))); mark_stack_bos++; return m; } inline mark* gc_heap::pinned_plug_of (size_t bos) { return &mark_stack_array [ bos ]; } inline mark* gc_heap::oldest_pin () { return pinned_plug_of (mark_stack_bos); } inline BOOL gc_heap::pinned_plug_que_empty_p () { return (mark_stack_bos == mark_stack_tos); } inline mark* gc_heap::before_oldest_pin() { if (mark_stack_bos >= 1) return pinned_plug_of (mark_stack_bos-1); else return 0; } inline BOOL gc_heap::ephemeral_pointer_p (uint8_t* o) { #ifdef USE_REGIONS int gen_num = object_gennum ((uint8_t*)o); assert (gen_num >= 0); return (gen_num < max_generation); #else return ((o >= ephemeral_low) && (o < ephemeral_high)); #endif //USE_REGIONS } #ifdef USE_REGIONS // This assumes o is guaranteed to be in a region. inline bool gc_heap::is_in_condemned_gc (uint8_t* o) { assert ((o >= g_gc_lowest_address) && (o < g_gc_highest_address)); int condemned_gen = settings.condemned_generation; if (condemned_gen < max_generation) { int gen = get_region_gen_num (o); if (gen > condemned_gen) { return false; } } return true; } // REGIONS TODO - // This method can be called by GCHeap::Promote/Relocate which means // it could be in the heap range but not actually in a valid region. // This would return true but find_object will return 0. But this // seems counter-intuitive so we should consider a better implementation. inline bool gc_heap::is_in_condemned (uint8_t* o) { if ((o >= g_gc_lowest_address) && (o < g_gc_highest_address)) return is_in_condemned_gc (o); else return false; } inline bool gc_heap::should_check_brick_for_reloc (uint8_t* o) { assert ((o >= g_gc_lowest_address) && (o < g_gc_highest_address)); int condemned_gen = settings.condemned_generation; if (condemned_gen < max_generation) { heap_segment* region = region_of (o); int gen = get_region_gen_num (region); if ((gen > condemned_gen) || (heap_segment_swept_in_plan (region))) { if (heap_segment_swept_in_plan (region)) { dprintf (4444, ("-Rsip %Ix", o)); } return false; } } else if (heap_segment_swept_in_plan (region_of (o))) { return false; } return true; } #endif //USE_REGIONS #ifdef MH_SC_MARK inline int& gc_heap::mark_stack_busy() { return g_mark_stack_busy [(heap_number+2)*HS_CACHE_LINE_SIZE/sizeof(int)]; } #endif //MH_SC_MARK void gc_heap::make_mark_stack (mark* arr) { reset_pinned_queue(); mark_stack_array = arr; mark_stack_array_length = MARK_STACK_INITIAL_LENGTH; #ifdef MH_SC_MARK mark_stack_busy() = 0; #endif //MH_SC_MARK } #ifdef BACKGROUND_GC inline size_t& gc_heap::bpromoted_bytes(int thread) { #ifdef MULTIPLE_HEAPS return g_bpromoted [thread*16]; #else //MULTIPLE_HEAPS UNREFERENCED_PARAMETER(thread); return g_bpromoted; #endif //MULTIPLE_HEAPS } void gc_heap::make_background_mark_stack (uint8_t** arr) { background_mark_stack_array = arr; background_mark_stack_array_length = MARK_STACK_INITIAL_LENGTH; background_mark_stack_tos = arr; } void gc_heap::make_c_mark_list (uint8_t** arr) { c_mark_list = arr; c_mark_list_index = 0; c_mark_list_length = 1 + (OS_PAGE_SIZE / MIN_OBJECT_SIZE); } #endif //BACKGROUND_GC #ifdef CARD_BUNDLE // The card bundle keeps track of groups of card words. static const size_t card_bundle_word_width = 32; // How do we express the fact that 32 bits (card_word_width) is one uint32_t? static const size_t card_bundle_size = (size_t)(GC_PAGE_SIZE / (sizeof(uint32_t)*card_bundle_word_width)); inline size_t card_bundle_word (size_t cardb) { return cardb / card_bundle_word_width; } inline uint32_t card_bundle_bit (size_t cardb) { return (uint32_t)(cardb % card_bundle_word_width); } size_t align_cardw_on_bundle (size_t cardw) { return ((size_t)(cardw + card_bundle_size - 1) & ~(card_bundle_size - 1 )); } // Get the card bundle representing a card word size_t cardw_card_bundle (size_t cardw) { return cardw / card_bundle_size; } // Get the first card word in a card bundle size_t card_bundle_cardw (size_t cardb) { return cardb * card_bundle_size; } // Clear the specified card bundle void gc_heap::card_bundle_clear (size_t cardb) { uint32_t bit = (uint32_t)(1 << card_bundle_bit (cardb)); uint32_t* bundle = &card_bundle_table[card_bundle_word (cardb)]; #ifdef MULTIPLE_HEAPS // card bundles may straddle segments and heaps, thus bits may be cleared concurrently if ((*bundle & bit) != 0) { Interlocked::And (bundle, ~bit); } #else *bundle &= ~bit; #endif // check for races assert ((*bundle & bit) == 0); dprintf (2, ("Cleared card bundle %Ix [%Ix, %Ix[", cardb, (size_t)card_bundle_cardw (cardb), (size_t)card_bundle_cardw (cardb+1))); } inline void set_bundle_bits (uint32_t* bundle, uint32_t bits) { #ifdef MULTIPLE_HEAPS // card bundles may straddle segments and heaps, thus bits may be set concurrently if ((*bundle & bits) != bits) { Interlocked::Or (bundle, bits); } #else *bundle |= bits; #endif // check for races assert ((*bundle & bits) == bits); } void gc_heap::card_bundle_set (size_t cardb) { uint32_t bits = (1 << card_bundle_bit (cardb)); set_bundle_bits (&card_bundle_table [card_bundle_word (cardb)], bits); } // Set the card bundle bits between start_cardb and end_cardb void gc_heap::card_bundles_set (size_t start_cardb, size_t end_cardb) { if (start_cardb == end_cardb) { card_bundle_set(start_cardb); return; } size_t start_word = card_bundle_word (start_cardb); size_t end_word = card_bundle_word (end_cardb); if (start_word < end_word) { // Set the partial words uint32_t bits = highbits (~0u, card_bundle_bit (start_cardb)); set_bundle_bits (&card_bundle_table [start_word], bits); if (card_bundle_bit (end_cardb)) { bits = lowbits (~0u, card_bundle_bit (end_cardb)); set_bundle_bits (&card_bundle_table [end_word], bits); } // Set the full words for (size_t i = start_word + 1; i < end_word; i++) { card_bundle_table [i] = ~0u; } } else { uint32_t bits = (highbits (~0u, card_bundle_bit (start_cardb)) & lowbits (~0u, card_bundle_bit (end_cardb))); set_bundle_bits (&card_bundle_table [start_word], bits); } } // Indicates whether the specified bundle is set. BOOL gc_heap::card_bundle_set_p (size_t cardb) { return (card_bundle_table[card_bundle_word(cardb)] & (1 << card_bundle_bit (cardb))); } // Returns the size (in bytes) of a card bundle representing the region from 'from' to 'end' size_t size_card_bundle_of (uint8_t* from, uint8_t* end) { // Number of heap bytes represented by a card bundle word size_t cbw_span = card_size * card_word_width * card_bundle_size * card_bundle_word_width; // Align the start of the region down from = (uint8_t*)((size_t)from & ~(cbw_span - 1)); // Align the end of the region up end = (uint8_t*)((size_t)(end + (cbw_span - 1)) & ~(cbw_span - 1)); // Make sure they're really aligned assert (((size_t)from & (cbw_span - 1)) == 0); assert (((size_t)end & (cbw_span - 1)) == 0); return ((end - from) / cbw_span) * sizeof (uint32_t); } // Takes a pointer to a card bundle table and an address, and returns a pointer that represents // where a theoretical card bundle table that represents every address (starting from 0) would // start if the bundle word representing the address were to be located at the pointer passed in. // The returned 'translated' pointer makes it convenient/fast to calculate where the card bundle // for a given address is using a simple shift operation on the address. uint32_t* translate_card_bundle_table (uint32_t* cb, uint8_t* lowest_address) { // The number of bytes of heap memory represented by a card bundle word const size_t heap_bytes_for_bundle_word = card_size * card_word_width * card_bundle_size * card_bundle_word_width; // Each card bundle word is 32 bits return (uint32_t*)((uint8_t*)cb - (((size_t)lowest_address / heap_bytes_for_bundle_word) * sizeof (uint32_t))); } void gc_heap::enable_card_bundles () { if (can_use_write_watch_for_card_table() && (!card_bundles_enabled())) { dprintf (1, ("Enabling card bundles")); // We initially set all of the card bundles card_bundles_set (cardw_card_bundle (card_word (card_of (lowest_address))), cardw_card_bundle (align_cardw_on_bundle (card_word (card_of (highest_address))))); settings.card_bundles = TRUE; } } BOOL gc_heap::card_bundles_enabled () { return settings.card_bundles; } #endif // CARD_BUNDLE #if defined (HOST_64BIT) #define brick_size ((size_t)4096) #else #define brick_size ((size_t)2048) #endif //HOST_64BIT inline size_t gc_heap::brick_of (uint8_t* add) { return (size_t)(add - lowest_address) / brick_size; } inline uint8_t* gc_heap::brick_address (size_t brick) { return lowest_address + (brick_size * brick); } void gc_heap::clear_brick_table (uint8_t* from, uint8_t* end) { size_t from_brick = brick_of (from); size_t end_brick = brick_of (end); memset (&brick_table[from_brick], 0, sizeof(brick_table[from_brick])*(end_brick-from_brick)); } //codes for the brick entries: //entry == 0 -> not assigned //entry >0 offset is entry-1 //entry <0 jump back entry bricks inline void gc_heap::set_brick (size_t index, ptrdiff_t val) { if (val < -32767) { val = -32767; } assert (val < 32767); if (val >= 0) brick_table [index] = (short)val+1; else brick_table [index] = (short)val; dprintf (3, ("set brick[%Ix] to %d\n", index, (short)val)); } inline int gc_heap::get_brick_entry (size_t index) { #ifdef MULTIPLE_HEAPS return VolatileLoadWithoutBarrier(&brick_table [index]); #else return brick_table[index]; #endif } inline uint8_t* align_on_brick (uint8_t* add) { return (uint8_t*)((size_t)(add + brick_size - 1) & ~(brick_size - 1)); } inline uint8_t* align_lower_brick (uint8_t* add) { return (uint8_t*)(((size_t)add) & ~(brick_size - 1)); } size_t size_brick_of (uint8_t* from, uint8_t* end) { assert (((size_t)from & (brick_size-1)) == 0); assert (((size_t)end & (brick_size-1)) == 0); return ((end - from) / brick_size) * sizeof (short); } inline uint8_t* gc_heap::card_address (size_t card) { return (uint8_t*) (card_size * card); } inline size_t gc_heap::card_of ( uint8_t* object) { return (size_t)(object) / card_size; } inline uint8_t* align_on_card (uint8_t* add) { return (uint8_t*)((size_t)(add + card_size - 1) & ~(card_size - 1 )); } inline uint8_t* align_on_card_word (uint8_t* add) { return (uint8_t*) ((size_t)(add + (card_size*card_word_width)-1) & ~(card_size*card_word_width - 1)); } inline uint8_t* align_lower_card (uint8_t* add) { return (uint8_t*)((size_t)add & ~(card_size-1)); } inline void gc_heap::clear_card (size_t card) { card_table [card_word (card)] = (card_table [card_word (card)] & ~(1 << card_bit (card))); dprintf (3,("Cleared card %Ix [%Ix, %Ix[", card, (size_t)card_address (card), (size_t)card_address (card+1))); } inline void gc_heap::set_card (size_t card) { size_t word = card_word (card); card_table[word] = (card_table [word] | (1 << card_bit (card))); #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES // Also set the card bundle that corresponds to the card size_t bundle_to_set = cardw_card_bundle(word); card_bundle_set(bundle_to_set); dprintf (3,("Set card %Ix [%Ix, %Ix[ and bundle %Ix", card, (size_t)card_address (card), (size_t)card_address (card+1), bundle_to_set)); #endif } inline BOOL gc_heap::card_set_p (size_t card) { return ( card_table [ card_word (card) ] & (1 << card_bit (card))); } // Returns the number of DWORDs in the card table that cover the // range of addresses [from, end[. size_t count_card_of (uint8_t* from, uint8_t* end) { return card_word (gcard_of (end - 1)) - card_word (gcard_of (from)) + 1; } // Returns the number of bytes to allocate for a card table // that covers the range of addresses [from, end[. size_t size_card_of (uint8_t* from, uint8_t* end) { return count_card_of (from, end) * sizeof(uint32_t); } // We don't store seg_mapping_table in card_table_info because there's only always one view. class card_table_info { public: unsigned recount; uint8_t* lowest_address; uint8_t* highest_address; short* brick_table; #ifdef CARD_BUNDLE uint32_t* card_bundle_table; #endif //CARD_BUNDLE // mark_array is always at the end of the data structure because we // want to be able to make one commit call for everything before it. #ifdef BACKGROUND_GC uint32_t* mark_array; #endif //BACKGROUND_GC size_t size; uint32_t* next_card_table; }; //These are accessors on untranslated cardtable inline unsigned& card_table_refcount (uint32_t* c_table) { return *(unsigned*)((char*)c_table - sizeof (card_table_info)); } inline uint8_t*& card_table_lowest_address (uint32_t* c_table) { return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->lowest_address; } uint32_t* translate_card_table (uint32_t* ct) { return (uint32_t*)((uint8_t*)ct - card_word (gcard_of (card_table_lowest_address (ct))) * sizeof(uint32_t)); } inline uint8_t*& card_table_highest_address (uint32_t* c_table) { return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->highest_address; } inline short*& card_table_brick_table (uint32_t* c_table) { return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->brick_table; } #ifdef CARD_BUNDLE inline uint32_t*& card_table_card_bundle_table (uint32_t* c_table) { return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->card_bundle_table; } #endif //CARD_BUNDLE #ifdef BACKGROUND_GC inline uint32_t*& card_table_mark_array (uint32_t* c_table) { return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->mark_array; } #ifdef HOST_64BIT #define mark_bit_pitch ((size_t)16) #else #define mark_bit_pitch ((size_t)8) #endif // HOST_64BIT #define mark_word_width ((size_t)32) #define mark_word_size (mark_word_width * mark_bit_pitch) inline uint8_t* align_on_mark_bit (uint8_t* add) { return (uint8_t*)((size_t)(add + (mark_bit_pitch - 1)) & ~(mark_bit_pitch - 1)); } inline uint8_t* align_lower_mark_bit (uint8_t* add) { return (uint8_t*)((size_t)(add) & ~(mark_bit_pitch - 1)); } inline BOOL is_aligned_on_mark_word (uint8_t* add) { return ((size_t)add == ((size_t)(add) & ~(mark_word_size - 1))); } inline uint8_t* align_on_mark_word (uint8_t* add) { return (uint8_t*)((size_t)(add + mark_word_size - 1) & ~(mark_word_size - 1)); } inline uint8_t* align_lower_mark_word (uint8_t* add) { return (uint8_t*)((size_t)(add) & ~(mark_word_size - 1)); } inline size_t mark_bit_of (uint8_t* add) { return ((size_t)add / mark_bit_pitch); } inline unsigned int mark_bit_bit (size_t mark_bit) { return (unsigned int)(mark_bit % mark_word_width); } inline size_t mark_bit_word (size_t mark_bit) { return (mark_bit / mark_word_width); } inline size_t mark_word_of (uint8_t* add) { return ((size_t)add) / mark_word_size; } uint8_t* mark_word_address (size_t wd) { return (uint8_t*)(wd*mark_word_size); } uint8_t* mark_bit_address (size_t mark_bit) { return (uint8_t*)(mark_bit*mark_bit_pitch); } inline size_t mark_bit_bit_of (uint8_t* add) { return (((size_t)add / mark_bit_pitch) % mark_word_width); } inline unsigned int gc_heap::mark_array_marked(uint8_t* add) { return mark_array [mark_word_of (add)] & (1 << mark_bit_bit_of (add)); } inline BOOL gc_heap::is_mark_bit_set (uint8_t* add) { return (mark_array [mark_word_of (add)] & (1 << mark_bit_bit_of (add))); } inline void gc_heap::mark_array_set_marked (uint8_t* add) { size_t index = mark_word_of (add); uint32_t val = (1 << mark_bit_bit_of (add)); #ifdef MULTIPLE_HEAPS Interlocked::Or (&(mark_array [index]), val); #else mark_array [index] |= val; #endif } inline void gc_heap::mark_array_clear_marked (uint8_t* add) { mark_array [mark_word_of (add)] &= ~(1 << mark_bit_bit_of (add)); } size_t size_mark_array_of (uint8_t* from, uint8_t* end) { assert (((size_t)from & ((mark_word_size)-1)) == 0); assert (((size_t)end & ((mark_word_size)-1)) == 0); return sizeof (uint32_t)*(((end - from) / mark_word_size)); } //In order to eliminate the lowest_address in the mark array //computations (mark_word_of, etc) mark_array is offset // according to the lowest_address. uint32_t* translate_mark_array (uint32_t* ma) { return (uint32_t*)((uint8_t*)ma - size_mark_array_of (0, g_gc_lowest_address)); } // from and end must be page aligned addresses. void gc_heap::clear_mark_array (uint8_t* from, uint8_t* end, BOOL check_only/*=TRUE*/ #ifdef FEATURE_BASICFREEZE , BOOL read_only/*=FALSE*/ #endif // FEATURE_BASICFREEZE ) { if(!gc_can_use_concurrent) return; #ifdef FEATURE_BASICFREEZE if (!read_only) #endif // FEATURE_BASICFREEZE { assert (from == align_on_mark_word (from)); } assert (end == align_on_mark_word (end)); uint8_t* current_lowest_address = background_saved_lowest_address; uint8_t* current_highest_address = background_saved_highest_address; //there is a possibility of the addresses to be //outside of the covered range because of a newly allocated //large object segment if ((end <= current_highest_address) && (from >= current_lowest_address)) { size_t beg_word = mark_word_of (align_on_mark_word (from)); //align end word to make sure to cover the address size_t end_word = mark_word_of (align_on_mark_word (end)); dprintf (3, ("Calling clearing mark array [%Ix, %Ix[ for addresses [%Ix, %Ix[(%s)", (size_t)mark_word_address (beg_word), (size_t)mark_word_address (end_word), (size_t)from, (size_t)end, (check_only ? "check_only" : "clear"))); if (!check_only) { uint8_t* op = from; while (op < mark_word_address (beg_word)) { mark_array_clear_marked (op); op += mark_bit_pitch; } memset (&mark_array[beg_word], 0, (end_word - beg_word)*sizeof (uint32_t)); } #ifdef _DEBUG else { //Beware, it is assumed that the mark array word straddling //start has been cleared before //verify that the array is empty. size_t markw = mark_word_of (align_on_mark_word (from)); size_t markw_end = mark_word_of (align_on_mark_word (end)); while (markw < markw_end) { assert (!(mark_array [markw])); markw++; } uint8_t* p = mark_word_address (markw_end); while (p < end) { assert (!(mark_array_marked (p))); p++; } } #endif //_DEBUG } } #endif //BACKGROUND_GC //These work on untranslated card tables inline uint32_t*& card_table_next (uint32_t* c_table) { return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->next_card_table; } inline size_t& card_table_size (uint32_t* c_table) { return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->size; } void own_card_table (uint32_t* c_table) { card_table_refcount (c_table) += 1; } void destroy_card_table (uint32_t* c_table); void delete_next_card_table (uint32_t* c_table) { uint32_t* n_table = card_table_next (c_table); if (n_table) { if (card_table_next (n_table)) { delete_next_card_table (n_table); } if (card_table_refcount (n_table) == 0) { destroy_card_table (n_table); card_table_next (c_table) = 0; } } } void release_card_table (uint32_t* c_table) { assert (card_table_refcount (c_table) >0); card_table_refcount (c_table) -= 1; if (card_table_refcount (c_table) == 0) { delete_next_card_table (c_table); if (card_table_next (c_table) == 0) { destroy_card_table (c_table); // sever the link from the parent if (&g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))] == c_table) { g_gc_card_table = 0; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES g_gc_card_bundle_table = 0; #endif #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP SoftwareWriteWatch::StaticClose(); #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP } else { uint32_t* p_table = &g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))]; if (p_table) { while (p_table && (card_table_next (p_table) != c_table)) p_table = card_table_next (p_table); card_table_next (p_table) = 0; } } } } } void destroy_card_table (uint32_t* c_table) { // delete (uint32_t*)&card_table_refcount(c_table); GCToOSInterface::VirtualRelease (&card_table_refcount(c_table), card_table_size(c_table)); dprintf (2, ("Table Virtual Free : %Ix", (size_t)&card_table_refcount(c_table))); } void gc_heap::get_card_table_element_sizes (uint8_t* start, uint8_t* end, size_t sizes[total_bookkeeping_elements]) { memset (sizes, 0, sizeof(size_t) * total_bookkeeping_elements); sizes[card_table_element] = size_card_of (start, end); sizes[brick_table_element] = size_brick_of (start, end); #ifdef CARD_BUNDLE if (can_use_write_watch_for_card_table()) { sizes[card_bundle_table_element] = size_card_bundle_of (start, end); } #endif //CARD_BUNDLE #if defined(FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP) && defined (BACKGROUND_GC) if (gc_can_use_concurrent) { sizes[software_write_watch_table_element] = SoftwareWriteWatch::GetTableByteSize(start, end); } #endif //FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP && BACKGROUND_GC sizes[seg_mapping_table_element] = size_seg_mapping_table_of (start, end); #ifdef BACKGROUND_GC if (gc_can_use_concurrent) { sizes[mark_array_element] = size_mark_array_of (start, end); } #endif //BACKGROUND_GC } void gc_heap::get_card_table_element_layout (uint8_t* start, uint8_t* end, size_t layout[total_bookkeeping_elements + 1]) { size_t sizes[total_bookkeeping_elements]; get_card_table_element_sizes(start, end, sizes); const size_t alignment[total_bookkeeping_elements + 1] = { sizeof (uint32_t), // card_table_element sizeof (short), // brick_table_element #ifdef CARD_BUNDLE sizeof (uint32_t), // card_bundle_table_element #endif //CARD_BUNDLE #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP sizeof(size_t), // software_write_watch_table_element #endif //FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP sizeof (uint8_t*), // seg_mapping_table_element #ifdef BACKGROUND_GC // In order to avoid a dependency between commit_mark_array_by_range and this logic, it is easier to make sure // pages for mark array never overlaps with pages in the seg mapping table. That way commit_mark_array_by_range // will never commit a page that is already committed here for the seg mapping table. OS_PAGE_SIZE, // mark_array_element #endif //BACKGROUND_GC // commit_mark_array_by_range extends the end pointer of the commit to the next page boundary, we better make sure it // is reserved OS_PAGE_SIZE // total_bookkeeping_elements }; layout[card_table_element] = ALIGN_UP(sizeof(card_table_info), alignment[card_table_element]); for (int element = brick_table_element; element <= total_bookkeeping_elements; element++) { layout[element] = layout[element - 1] + sizes[element - 1]; if ((element != total_bookkeeping_elements) && (sizes[element] != 0)) { layout[element] = ALIGN_UP(layout[element], alignment[element]); } } } #ifdef USE_REGIONS bool gc_heap::on_used_changed (uint8_t* new_used) { if (new_used > bookkeeping_covered_committed) { bool speculative_commit_tried = false; #ifdef STRESS_REGIONS if (gc_rand::get_rand(10) > 3) { dprintf (REGIONS_LOG, ("skipping speculative commit under stress regions")); speculative_commit_tried = true; } #endif while (true) { uint8_t* new_bookkeeping_covered_committed = nullptr; if (speculative_commit_tried) { new_bookkeeping_covered_committed = new_used; } else { uint64_t committed_size = (uint64_t)(bookkeeping_covered_committed - g_gc_lowest_address); uint64_t total_size = (uint64_t)(g_gc_highest_address - g_gc_lowest_address); assert (committed_size <= total_size); assert (committed_size < (UINT64_MAX / 2)); uint64_t new_committed_size = min(committed_size * 2, total_size); assert ((UINT64_MAX - new_committed_size) > (uint64_t)g_gc_lowest_address); uint8_t* double_commit = g_gc_lowest_address + new_committed_size; new_bookkeeping_covered_committed = max(double_commit, new_used); dprintf (REGIONS_LOG, ("committed_size = %Id", committed_size)); dprintf (REGIONS_LOG, ("total_size = %Id", total_size)); dprintf (REGIONS_LOG, ("new_committed_size = %Id", new_committed_size)); dprintf (REGIONS_LOG, ("double_commit = %p", double_commit)); } dprintf (REGIONS_LOG, ("bookkeeping_covered_committed = %p", bookkeeping_covered_committed)); dprintf (REGIONS_LOG, ("new_bookkeeping_covered_committed = %p", new_bookkeeping_covered_committed)); if (inplace_commit_card_table (bookkeeping_covered_committed, new_bookkeeping_covered_committed)) { bookkeeping_covered_committed = new_bookkeeping_covered_committed; break; } else { if (new_bookkeeping_covered_committed == new_used) { dprintf (REGIONS_LOG, ("The minimal commit for the GC bookkeepping data structure failed, giving up")); return false; } dprintf (REGIONS_LOG, ("The speculative commit for the GC bookkeepping data structure failed, retry for minimal commit")); speculative_commit_tried = true; } } } return true; } bool gc_heap::inplace_commit_card_table (uint8_t* from, uint8_t* to) { dprintf (REGIONS_LOG, ("inplace_commit_card_table(%p, %p), size = %Id", from, to, to - from)); uint8_t* start = g_gc_lowest_address; uint8_t* end = g_gc_highest_address; uint8_t* commit_begins[total_bookkeeping_elements]; size_t commit_sizes[total_bookkeeping_elements]; size_t new_sizes[total_bookkeeping_elements]; bool initial_commit = (from == start); bool additional_commit = !initial_commit && (to > from); if (initial_commit || additional_commit) { #ifdef DEBUG size_t offsets[total_bookkeeping_elements + 1]; get_card_table_element_layout(start, end, offsets); dprintf (REGIONS_LOG, ("layout")); for (int i = card_table_element; i <= total_bookkeeping_elements; i++) { assert (offsets[i] == card_table_element_layout[i]); dprintf (REGIONS_LOG, ("%Id", card_table_element_layout[i])); } #endif get_card_table_element_sizes (start, to, new_sizes); #ifdef DEBUG dprintf (REGIONS_LOG, ("new_sizes")); for (int i = card_table_element; i < total_bookkeeping_elements; i++) { dprintf (REGIONS_LOG, ("%Id", new_sizes[i])); } if (additional_commit) { size_t current_sizes[total_bookkeeping_elements]; get_card_table_element_sizes (start, from, current_sizes); dprintf (REGIONS_LOG, ("old_sizes")); for (int i = card_table_element; i < total_bookkeeping_elements; i++) { assert (current_sizes[i] == bookkeeping_sizes[i]); dprintf (REGIONS_LOG, ("%Id", bookkeeping_sizes[i])); } } #endif for (int i = card_table_element; i <= seg_mapping_table_element; i++) { uint8_t* required_begin = nullptr; uint8_t* required_end = nullptr; uint8_t* commit_begin = nullptr; uint8_t* commit_end = nullptr; if (initial_commit) { required_begin = bookkeeping_covered_start + ((i == card_table_element) ? 0 : card_table_element_layout[i]); required_end = bookkeeping_covered_start + card_table_element_layout[i] + new_sizes[i]; commit_begin = align_lower_page(required_begin); } else { assert (additional_commit); required_begin = bookkeeping_covered_start + card_table_element_layout[i] + bookkeeping_sizes[i]; required_end = required_begin + new_sizes[i] - bookkeeping_sizes[i]; commit_begin = align_on_page(required_begin); } assert (required_begin <= required_end); commit_end = align_on_page(required_end); commit_end = min (commit_end, align_lower_page(bookkeeping_covered_start + card_table_element_layout[i + 1])); commit_begin = min (commit_begin, commit_end); assert (commit_begin <= commit_end); dprintf (REGIONS_LOG, ("required = [%p, %p), size = %Id", required_begin, required_end, required_end - required_begin)); dprintf (REGIONS_LOG, ("commit = [%p, %p), size = %Id", commit_begin, commit_end, commit_end - commit_begin)); commit_begins[i] = commit_begin; commit_sizes[i] = (size_t)(commit_end - commit_begin); } dprintf (REGIONS_LOG, ("---------------------------------------")); } else { return true; } int failed_commit = -1; for (int i = card_table_element; i <= seg_mapping_table_element; i++) { bool succeed; if (commit_sizes[i] > 0) { succeed = virtual_commit (commit_begins[i], commit_sizes[i], gc_oh_num::none); if (!succeed) { failed_commit = i; break; } } } if (failed_commit == -1) { for (int i = card_table_element; i < total_bookkeeping_elements; i++) { bookkeeping_sizes[i] = new_sizes[i]; } } else { for (int i = card_table_element; i < failed_commit; i++) { bool succeed; if (commit_sizes[i] > 0) { succeed = virtual_decommit (commit_begins[i], commit_sizes[i], gc_oh_num::none); assert (succeed); } } return false; } return true; } #endif //USE_REGIONS uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end) { assert (g_gc_lowest_address == start); assert (g_gc_highest_address == end); uint32_t virtual_reserve_flags = VirtualReserveFlags::None; #ifdef CARD_BUNDLE if (can_use_write_watch_for_card_table()) { #ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES // If we're not manually managing the card bundles, we will need to use OS write // watch APIs over this region to track changes. virtual_reserve_flags |= VirtualReserveFlags::WriteWatch; #endif } #endif //CARD_BUNDLE get_card_table_element_layout(start, end, card_table_element_layout); size_t alloc_size = card_table_element_layout[total_bookkeeping_elements]; uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size, 0, virtual_reserve_flags); #ifdef USE_REGIONS bookkeeping_covered_start = mem; #endif //USE_REGIONS if (!mem) return 0; dprintf (2, ("Init - Card table alloc for %Id bytes: [%Ix, %Ix[", alloc_size, (size_t)mem, (size_t)(mem+alloc_size))); #ifdef USE_REGIONS if (!inplace_commit_card_table (g_gc_lowest_address, global_region_allocator.get_left_used_unsafe())) { dprintf (1, ("Card table commit failed")); GCToOSInterface::VirtualRelease (mem, alloc_size); return 0; } bookkeeping_covered_committed = global_region_allocator.get_left_used_unsafe(); #else // in case of background gc, the mark array will be committed separately (per segment). size_t commit_size = card_table_element_layout[seg_mapping_table_element + 1]; if (!virtual_commit (mem, commit_size, gc_oh_num::none)) { dprintf (1, ("Card table commit failed")); GCToOSInterface::VirtualRelease (mem, alloc_size); return 0; } #endif //USE_REGIONS // initialize the ref count uint32_t* ct = (uint32_t*)(mem + card_table_element_layout[card_table_element]); card_table_refcount (ct) = 0; card_table_lowest_address (ct) = start; card_table_highest_address (ct) = end; card_table_brick_table (ct) = (short*)(mem + card_table_element_layout[brick_table_element]); card_table_size (ct) = alloc_size; card_table_next (ct) = 0; #ifdef CARD_BUNDLE card_table_card_bundle_table (ct) = (uint32_t*)(mem + card_table_element_layout[card_bundle_table_element]); #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), g_gc_lowest_address); #endif #endif //CARD_BUNDLE #if defined(FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP) && defined (BACKGROUND_GC) if (gc_can_use_concurrent) { SoftwareWriteWatch::InitializeUntranslatedTable(mem + card_table_element_layout[software_write_watch_table_element], start); } #endif //FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP && BACKGROUND_GC seg_mapping_table = (seg_mapping*)(mem + card_table_element_layout[seg_mapping_table_element]); seg_mapping_table = (seg_mapping*)((uint8_t*)seg_mapping_table - size_seg_mapping_table_of (0, (align_lower_segment (g_gc_lowest_address)))); #ifdef BACKGROUND_GC if (gc_can_use_concurrent) card_table_mark_array (ct) = (uint32_t*)(mem + card_table_element_layout[mark_array_element]); else card_table_mark_array (ct) = NULL; #endif //BACKGROUND_GC return translate_card_table(ct); } void gc_heap::set_fgm_result (failure_get_memory f, size_t s, BOOL loh_p) { #ifdef MULTIPLE_HEAPS for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; hp->fgm_result.set_fgm (f, s, loh_p); } #else //MULTIPLE_HEAPS fgm_result.set_fgm (f, s, loh_p); #endif //MULTIPLE_HEAPS } //returns 0 for success, -1 otherwise // We are doing all the decommitting here because we want to make sure we have // enough memory to do so - if we do this during copy_brick_card_table and // and fail to decommit it would make the failure case very complicated to // handle. This way we can waste some decommit if we call this multiple // times before the next FGC but it's easier to handle the failure case. int gc_heap::grow_brick_card_tables (uint8_t* start, uint8_t* end, size_t size, heap_segment* new_seg, gc_heap* hp, BOOL uoh_p) { uint8_t* la = g_gc_lowest_address; uint8_t* ha = g_gc_highest_address; uint8_t* saved_g_lowest_address = min (start, g_gc_lowest_address); uint8_t* saved_g_highest_address = max (end, g_gc_highest_address); seg_mapping* new_seg_mapping_table = nullptr; #ifdef BACKGROUND_GC // This value is only for logging purpose - it's not necessarily exactly what we // would commit for mark array but close enough for diagnostics purpose. size_t logging_ma_commit_size = size_mark_array_of (0, (uint8_t*)size); #endif //BACKGROUND_GC // See if the address is already covered if ((la != saved_g_lowest_address ) || (ha != saved_g_highest_address)) { { //modify the highest address so the span covered //is twice the previous one. uint8_t* top = (uint8_t*)0 + Align (GCToOSInterface::GetVirtualMemoryLimit()); // On non-Windows systems, we get only an approximate value that can possibly be // slightly lower than the saved_g_highest_address. // In such case, we set the top to the saved_g_highest_address so that the // card and brick tables always cover the whole new range. if (top < saved_g_highest_address) { top = saved_g_highest_address; } size_t ps = ha-la; #ifdef HOST_64BIT if (ps > (uint64_t)200*1024*1024*1024) ps += (uint64_t)100*1024*1024*1024; else #endif // HOST_64BIT ps *= 2; if (saved_g_lowest_address < g_gc_lowest_address) { if (ps > (size_t)g_gc_lowest_address) saved_g_lowest_address = (uint8_t*)(size_t)OS_PAGE_SIZE; else { assert (((size_t)g_gc_lowest_address - ps) >= OS_PAGE_SIZE); saved_g_lowest_address = min (saved_g_lowest_address, (g_gc_lowest_address - ps)); } } if (saved_g_highest_address > g_gc_highest_address) { saved_g_highest_address = max ((saved_g_lowest_address + ps), saved_g_highest_address); if (saved_g_highest_address > top) saved_g_highest_address = top; } } dprintf (GC_TABLE_LOG, ("Growing card table [%Ix, %Ix[", (size_t)saved_g_lowest_address, (size_t)saved_g_highest_address)); bool write_barrier_updated = false; uint32_t virtual_reserve_flags = VirtualReserveFlags::None; uint32_t* saved_g_card_table = g_gc_card_table; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES uint32_t* saved_g_card_bundle_table = g_gc_card_bundle_table; #endif get_card_table_element_layout(saved_g_lowest_address, saved_g_highest_address, card_table_element_layout); size_t cb = 0; uint32_t* ct = 0; uint32_t* translated_ct = 0; #ifdef CARD_BUNDLE if (can_use_write_watch_for_card_table()) { cb = size_card_bundle_of (saved_g_lowest_address, saved_g_highest_address); #ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES // If we're not manually managing the card bundles, we will need to use OS write // watch APIs over this region to track changes. virtual_reserve_flags |= VirtualReserveFlags::WriteWatch; #endif } #endif //CARD_BUNDLE size_t alloc_size = card_table_element_layout[total_bookkeeping_elements]; uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size, 0, virtual_reserve_flags); if (!mem) { set_fgm_result (fgm_grow_table, alloc_size, uoh_p); goto fail; } dprintf (GC_TABLE_LOG, ("Table alloc for %Id bytes: [%Ix, %Ix[", alloc_size, (size_t)mem, (size_t)((uint8_t*)mem+alloc_size))); { // in case of background gc, the mark array will be committed separately (per segment). size_t commit_size = card_table_element_layout[seg_mapping_table_element + 1]; if (!virtual_commit (mem, commit_size, gc_oh_num::none)) { dprintf (GC_TABLE_LOG, ("Table commit failed")); set_fgm_result (fgm_commit_table, commit_size, uoh_p); goto fail; } } ct = (uint32_t*)(mem + card_table_element_layout[card_table_element]); card_table_refcount (ct) = 0; card_table_lowest_address (ct) = saved_g_lowest_address; card_table_highest_address (ct) = saved_g_highest_address; card_table_next (ct) = &g_gc_card_table[card_word (gcard_of (la))]; //clear the card table /* memclr ((uint8_t*)ct, (((saved_g_highest_address - saved_g_lowest_address)*sizeof (uint32_t) / (card_size * card_word_width)) + sizeof (uint32_t))); */ // No initialization needed, will be done in copy_brick_card card_table_brick_table (ct) = (short*)(mem + card_table_element_layout[brick_table_element]); #ifdef CARD_BUNDLE card_table_card_bundle_table (ct) = (uint32_t*)(mem + card_table_element_layout[card_bundle_table_element]); //set all bundle to look at all of the cards memset(card_table_card_bundle_table (ct), 0xFF, cb); #endif //CARD_BUNDLE new_seg_mapping_table = (seg_mapping*)(mem + card_table_element_layout[seg_mapping_table_element]); new_seg_mapping_table = (seg_mapping*)((uint8_t*)new_seg_mapping_table - size_seg_mapping_table_of (0, (align_lower_segment (saved_g_lowest_address)))); memcpy(&new_seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)], &seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)], size_seg_mapping_table_of(g_gc_lowest_address, g_gc_highest_address)); // new_seg_mapping_table gets assigned to seg_mapping_table at the bottom of this function, // not here. The reason for this is that, if we fail at mark array committing (OOM) and we've // already switched seg_mapping_table to point to the new mapping table, we'll decommit it and // run into trouble. By not assigning here, we're making sure that we will not change seg_mapping_table // if an OOM occurs. #ifdef BACKGROUND_GC if(gc_can_use_concurrent) card_table_mark_array (ct) = (uint32_t*)(mem + card_table_element_layout[mark_array_element]); else card_table_mark_array (ct) = NULL; #endif //BACKGROUND_GC translated_ct = translate_card_table (ct); dprintf (GC_TABLE_LOG, ("card table: %Ix(translated: %Ix), seg map: %Ix, mark array: %Ix", (size_t)ct, (size_t)translated_ct, (size_t)new_seg_mapping_table, (size_t)card_table_mark_array (ct))); #ifdef BACKGROUND_GC if (hp->is_bgc_in_progress()) { dprintf (GC_TABLE_LOG, ("new low: %Ix, new high: %Ix, latest mark array is %Ix(translate: %Ix)", saved_g_lowest_address, saved_g_highest_address, card_table_mark_array (ct), translate_mark_array (card_table_mark_array (ct)))); uint32_t* new_mark_array = (uint32_t*)((uint8_t*)card_table_mark_array (ct) - size_mark_array_of (0, saved_g_lowest_address)); if (!commit_new_mark_array_global (new_mark_array)) { dprintf (GC_TABLE_LOG, ("failed to commit portions in the mark array for existing segments")); set_fgm_result (fgm_commit_table, logging_ma_commit_size, uoh_p); goto fail; } if (!commit_mark_array_new_seg (hp, new_seg, translated_ct, saved_g_lowest_address)) { dprintf (GC_TABLE_LOG, ("failed to commit mark array for the new seg")); set_fgm_result (fgm_commit_table, logging_ma_commit_size, uoh_p); goto fail; } } else { clear_commit_flag_global(); } #endif //BACKGROUND_GC #if defined(FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP) && defined(BACKGROUND_GC) if (gc_can_use_concurrent) { // The current design of software write watch requires that the runtime is suspended during resize. Suspending // on resize is preferred because it is a far less frequent operation than GetWriteWatch() / ResetWriteWatch(). // Suspending here allows copying dirty state from the old table into the new table, and not have to merge old // table info lazily as done for card tables. // Either this thread was the thread that did the suspension which means we are suspended; or this is called // from a GC thread which means we are in a blocking GC and also suspended. bool is_runtime_suspended = GCToEEInterface::IsGCThread(); if (!is_runtime_suspended) { // Note on points where the runtime is suspended anywhere in this function. Upon an attempt to suspend the // runtime, a different thread may suspend first, causing this thread to block at the point of the suspend call. // So, at any suspend point, externally visible state needs to be consistent, as code that depends on that state // may run while this thread is blocked. This includes updates to g_gc_card_table, g_gc_lowest_address, and // g_gc_highest_address. suspend_EE(); } g_gc_card_table = translated_ct; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), saved_g_lowest_address); #endif SoftwareWriteWatch::SetResizedUntranslatedTable( mem + card_table_element_layout[software_write_watch_table_element], saved_g_lowest_address, saved_g_highest_address); seg_mapping_table = new_seg_mapping_table; // Since the runtime is already suspended, update the write barrier here as well. // This passes a bool telling whether we need to switch to the post // grow version of the write barrier. This test tells us if the new // segment was allocated at a lower address than the old, requiring // that we start doing an upper bounds check in the write barrier. g_gc_lowest_address = saved_g_lowest_address; g_gc_highest_address = saved_g_highest_address; stomp_write_barrier_resize(true, la != saved_g_lowest_address); write_barrier_updated = true; if (!is_runtime_suspended) { restart_EE(); } } else #endif //FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP && BACKGROUND_GC { g_gc_card_table = translated_ct; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), saved_g_lowest_address); #endif } if (!write_barrier_updated) { seg_mapping_table = new_seg_mapping_table; GCToOSInterface::FlushProcessWriteBuffers(); g_gc_lowest_address = saved_g_lowest_address; g_gc_highest_address = saved_g_highest_address; // This passes a bool telling whether we need to switch to the post // grow version of the write barrier. This test tells us if the new // segment was allocated at a lower address than the old, requiring // that we start doing an upper bounds check in the write barrier. // This will also suspend the runtime if the write barrier type needs // to be changed, so we are doing this after all global state has // been updated. See the comment above suspend_EE() above for more // info. stomp_write_barrier_resize(GCToEEInterface::IsGCThread(), la != saved_g_lowest_address); } return 0; fail: if (mem) { assert(g_gc_card_table == saved_g_card_table); #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES assert(g_gc_card_bundle_table == saved_g_card_bundle_table); #endif if (!GCToOSInterface::VirtualRelease (mem, alloc_size)) { dprintf (GC_TABLE_LOG, ("GCToOSInterface::VirtualRelease failed")); assert (!"release failed"); } } return -1; } else { #ifdef BACKGROUND_GC if (hp->is_bgc_in_progress()) { dprintf (GC_TABLE_LOG, ("in range new seg %Ix, mark_array is %Ix", new_seg, hp->mark_array)); if (!commit_mark_array_new_seg (hp, new_seg)) { dprintf (GC_TABLE_LOG, ("failed to commit mark array for the new seg in range")); set_fgm_result (fgm_commit_table, logging_ma_commit_size, uoh_p); return -1; } } #endif //BACKGROUND_GC } return 0; } //copy all of the arrays managed by the card table for a page aligned range void gc_heap::copy_brick_card_range (uint8_t* la, uint32_t* old_card_table, short* old_brick_table, uint8_t* start, uint8_t* end) { ptrdiff_t brick_offset = brick_of (start) - brick_of (la); dprintf (2, ("copying tables for range [%Ix %Ix[", (size_t)start, (size_t)end)); // copy brick table short* brick_start = &brick_table [brick_of (start)]; if (old_brick_table) { // segments are always on page boundaries memcpy (brick_start, &old_brick_table[brick_offset], size_brick_of (start, end)); } uint32_t* old_ct = &old_card_table[card_word (card_of (la))]; #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { uint32_t* old_mark_array = card_table_mark_array (old_ct); // We don't need to go through all the card tables here because // we only need to copy from the GC version of the mark array - when we // mark (even in allocate_uoh_object) we always use that mark array. if ((card_table_highest_address (old_ct) >= start) && (card_table_lowest_address (old_ct) <= end)) { if ((background_saved_highest_address >= start) && (background_saved_lowest_address <= end)) { //copy the mark bits // segments are always on page boundaries uint8_t* m_start = max (background_saved_lowest_address, start); uint8_t* m_end = min (background_saved_highest_address, end); memcpy (&mark_array[mark_word_of (m_start)], &old_mark_array[mark_word_of (m_start) - mark_word_of (la)], size_mark_array_of (m_start, m_end)); } } else { //only large segments can be out of range assert (old_brick_table == 0); } } #endif //BACKGROUND_GC // n way merge with all of the card table ever used in between uint32_t* ct = card_table_next (&card_table[card_word (card_of(lowest_address))]); assert (ct); while (card_table_next (old_ct) != ct) { //copy if old card table contained [start, end[ if ((card_table_highest_address (ct) >= end) && (card_table_lowest_address (ct) <= start)) { // or the card_tables size_t start_word = card_word (card_of (start)); uint32_t* dest = &card_table[start_word]; uint32_t* src = &((translate_card_table (ct))[start_word]); ptrdiff_t count = count_card_of (start, end); for (int x = 0; x < count; x++) { *dest |= *src; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES if (*src != 0) { card_bundle_set(cardw_card_bundle(start_word+x)); } #endif dest++; src++; } } ct = card_table_next (ct); } } void gc_heap::copy_brick_card_table() { uint32_t* old_card_table = card_table; short* old_brick_table = brick_table; uint8_t* la = lowest_address; #ifdef _DEBUG uint8_t* ha = highest_address; assert (la == card_table_lowest_address (&old_card_table[card_word (card_of (la))])); assert (ha == card_table_highest_address (&old_card_table[card_word (card_of (la))])); #endif //_DEBUG /* todo: Need a global lock for this */ uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))]; own_card_table (ct); card_table = translate_card_table (ct); /* End of global lock */ highest_address = card_table_highest_address (ct); lowest_address = card_table_lowest_address (ct); brick_table = card_table_brick_table (ct); #ifdef BACKGROUND_GC if (gc_can_use_concurrent) { mark_array = translate_mark_array (card_table_mark_array (ct)); assert (mark_word_of (g_gc_highest_address) == mark_word_of (align_on_mark_word (g_gc_highest_address))); } else mark_array = NULL; #endif //BACKGROUND_GC #ifdef CARD_BUNDLE card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct), g_gc_lowest_address); // Ensure that the word that represents g_gc_lowest_address in the translated table is located at the // start of the untranslated table. assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] == card_table_card_bundle_table (ct)); //set the card table if we are in a heap growth scenario if (card_bundles_enabled()) { card_bundles_set (cardw_card_bundle (card_word (card_of (lowest_address))), cardw_card_bundle (align_cardw_on_bundle (card_word (card_of (highest_address))))); } //check if we need to turn on card_bundles. #ifdef MULTIPLE_HEAPS // use INT64 arithmetic here because of possible overflow on 32p uint64_t th = (uint64_t)MH_TH_CARD_BUNDLE*gc_heap::n_heaps; #else // use INT64 arithmetic here because of possible overflow on 32p uint64_t th = (uint64_t)SH_TH_CARD_BUNDLE; #endif //MULTIPLE_HEAPS if (reserved_memory >= th) { enable_card_bundles(); } #endif //CARD_BUNDLE // for each of the segments and heaps, copy the brick table and // or the card table for (int i = get_start_generation_index(); i < total_generation_count; i++) { heap_segment* seg = generation_start_segment (generation_of (i)); while (seg) { if (heap_segment_read_only_p (seg) && !heap_segment_in_range_p (seg)) { //check if it became in range if ((heap_segment_reserved (seg) > lowest_address) && (heap_segment_mem (seg) < highest_address)) { set_ro_segment_in_range (seg); } } else { uint8_t* end = align_on_page (heap_segment_allocated (seg)); copy_brick_card_range (la, old_card_table, (i < uoh_start_generation) ? old_brick_table : NULL, align_lower_page (heap_segment_mem (seg)), end); } seg = heap_segment_next (seg); } } release_card_table (&old_card_table[card_word (card_of(la))]); } #ifdef FEATURE_BASICFREEZE BOOL gc_heap::insert_ro_segment (heap_segment* seg) { #ifdef FEATURE_EVENT_TRACE if (!use_frozen_segments_p) use_frozen_segments_p = true; #endif //FEATURE_EVENT_TRACE enter_spin_lock (&gc_heap::gc_lock); if (!gc_heap::seg_table->ensure_space_for_insert () #ifdef BACKGROUND_GC || (is_bgc_in_progress() && !commit_mark_array_new_seg(__this, seg)) #endif //BACKGROUND_GC ) { leave_spin_lock(&gc_heap::gc_lock); return FALSE; } //insert at the head of the segment list generation* gen2 = generation_of (max_generation); heap_segment* oldhead = generation_start_segment (gen2); heap_segment_next (seg) = oldhead; generation_start_segment (gen2) = seg; #ifdef USE_REGIONS dprintf (REGIONS_LOG, ("setting gen2 start seg to %Ix(%Ix)->%Ix", (size_t)seg, heap_segment_mem (seg), heap_segment_mem (oldhead))); if (generation_tail_ro_region (gen2) == 0) { dprintf (REGIONS_LOG, ("setting gen2 tail ro -> %Ix", heap_segment_mem (seg))); generation_tail_ro_region (gen2) = seg; } #endif //USE_REGIONS seg_table->insert (heap_segment_mem(seg), (size_t)seg); seg_mapping_table_add_ro_segment (seg); if ((heap_segment_reserved (seg) > lowest_address) && (heap_segment_mem (seg) < highest_address)) { set_ro_segment_in_range (seg); } FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(seg), (size_t)(heap_segment_reserved (seg) - heap_segment_mem(seg)), gc_etw_segment_read_only_heap); leave_spin_lock (&gc_heap::gc_lock); return TRUE; } // No one is calling this function right now. If this is getting called we need // to take care of decommitting the mark array for it - we will need to remember // which portion of the mark array was committed and only decommit that. void gc_heap::remove_ro_segment (heap_segment* seg) { //clear the mark bits so a new segment allocated in its place will have a clear mark bits #ifdef BACKGROUND_GC if (gc_can_use_concurrent) { clear_mark_array (align_lower_mark_word (max (heap_segment_mem (seg), lowest_address)), align_on_card_word (min (heap_segment_allocated (seg), highest_address)), false); // read_only segments need the mark clear } #endif //BACKGROUND_GC enter_spin_lock (&gc_heap::gc_lock); seg_table->remove (heap_segment_mem (seg)); seg_mapping_table_remove_ro_segment (seg); // Locate segment (and previous segment) in the list. generation* gen2 = generation_of (max_generation); #ifdef USE_REGIONS if (generation_tail_ro_region (gen2) == seg) { generation_tail_ro_region (gen2) = 0; } #endif //USE_REGIONS heap_segment* curr_seg = generation_start_segment (gen2); heap_segment* prev_seg = NULL; while (curr_seg && curr_seg != seg) { prev_seg = curr_seg; curr_seg = heap_segment_next (curr_seg); } assert (curr_seg == seg); // Patch previous segment (or list head if there is none) to skip the removed segment. if (prev_seg) heap_segment_next (prev_seg) = heap_segment_next (curr_seg); else generation_start_segment (gen2) = heap_segment_next (curr_seg); leave_spin_lock (&gc_heap::gc_lock); } #endif //FEATURE_BASICFREEZE BOOL gc_heap::set_ro_segment_in_range (heap_segment* seg) { seg->flags |= heap_segment_flags_inrange; ro_segments_in_range = TRUE; return TRUE; } uint8_t** make_mark_list (size_t size) { uint8_t** mark_list = new (nothrow) uint8_t* [size]; return mark_list; } #define swap(a,b){uint8_t* t; t = a; a = b; b = t;} void verify_qsort_array (uint8_t* *low, uint8_t* *high) { uint8_t **i = 0; for (i = low+1; i <= high; i++) { if (*i < *(i-1)) { FATAL_GC_ERROR(); } } } #ifndef USE_INTROSORT void qsort1( uint8_t* *low, uint8_t* *high, unsigned int depth) { if (((low + 16) >= high) || (depth > 100)) { //insertion sort uint8_t **i, **j; for (i = low+1; i <= high; i++) { uint8_t* val = *i; for (j=i;j >low && val<*(j-1);j--) { *j=*(j-1); } *j=val; } } else { uint8_t *pivot, **left, **right; //sort low middle and high if (*(low+((high-low)/2)) < *low) swap (*(low+((high-low)/2)), *low); if (*high < *low) swap (*low, *high); if (*high < *(low+((high-low)/2))) swap (*(low+((high-low)/2)), *high); swap (*(low+((high-low)/2)), *(high-1)); pivot = *(high-1); left = low; right = high-1; while (1) { while (*(--right) > pivot); while (*(++left) < pivot); if (left < right) { swap(*left, *right); } else break; } swap (*left, *(high-1)); qsort1(low, left-1, depth+1); qsort1(left+1, high, depth+1); } } #endif //USE_INTROSORT void rqsort1( uint8_t* *low, uint8_t* *high) { if ((low + 16) >= high) { //insertion sort uint8_t **i, **j; for (i = low+1; i <= high; i++) { uint8_t* val = *i; for (j=i;j >low && val>*(j-1);j--) { *j=*(j-1); } *j=val; } } else { uint8_t *pivot, **left, **right; //sort low middle and high if (*(low+((high-low)/2)) > *low) swap (*(low+((high-low)/2)), *low); if (*high > *low) swap (*low, *high); if (*high > *(low+((high-low)/2))) swap (*(low+((high-low)/2)), *high); swap (*(low+((high-low)/2)), *(high-1)); pivot = *(high-1); left = low; right = high-1; while (1) { while (*(--right) < pivot); while (*(++left) > pivot); if (left < right) { swap(*left, *right); } else break; } swap (*left, *(high-1)); rqsort1(low, left-1); rqsort1(left+1, high); } } // vxsort uses introsort as a fallback if the AVX2 instruction set is not supported #if defined(USE_INTROSORT) || defined(USE_VXSORT) class introsort { private: static const int size_threshold = 64; static const int max_depth = 100; inline static void swap_elements(uint8_t** i,uint8_t** j) { uint8_t* t=*i; *i=*j; *j=t; } public: static void sort (uint8_t** begin, uint8_t** end, int ignored) { ignored = 0; introsort_loop (begin, end, max_depth); insertionsort (begin, end); } private: static void introsort_loop (uint8_t** lo, uint8_t** hi, int depth_limit) { while (hi-lo >= size_threshold) { if (depth_limit == 0) { heapsort (lo, hi); return; } uint8_t** p=median_partition (lo, hi); depth_limit=depth_limit-1; introsort_loop (p, hi, depth_limit); hi=p-1; } } static uint8_t** median_partition (uint8_t** low, uint8_t** high) { uint8_t *pivot, **left, **right; //sort low middle and high if (*(low+((high-low)/2)) < *low) swap_elements ((low+((high-low)/2)), low); if (*high < *low) swap_elements (low, high); if (*high < *(low+((high-low)/2))) swap_elements ((low+((high-low)/2)), high); swap_elements ((low+((high-low)/2)), (high-1)); pivot = *(high-1); left = low; right = high-1; while (1) { while (*(--right) > pivot); while (*(++left) < pivot); if (left < right) { swap_elements(left, right); } else break; } swap_elements (left, (high-1)); return left; } static void insertionsort (uint8_t** lo, uint8_t** hi) { for (uint8_t** i=lo+1; i <= hi; i++) { uint8_t** j = i; uint8_t* t = *i; while((j > lo) && (t <*(j-1))) { *j = *(j-1); j--; } *j = t; } } static void heapsort (uint8_t** lo, uint8_t** hi) { size_t n = hi - lo + 1; for (size_t i=n / 2; i >= 1; i--) { downheap (i,n,lo); } for (size_t i = n; i > 1; i--) { swap_elements (lo, lo + i - 1); downheap(1, i - 1, lo); } } static void downheap (size_t i, size_t n, uint8_t** lo) { uint8_t* d = *(lo + i - 1); size_t child; while (i <= n / 2) { child = 2*i; if (child < n && *(lo + child - 1)<(*(lo + child))) { child++; } if (!(d<*(lo + child - 1))) { break; } *(lo + i - 1) = *(lo + child - 1); i = child; } *(lo + i - 1) = d; } }; #endif //defined(USE_INTROSORT) || defined(USE_VXSORT) #ifdef USE_VXSORT static void do_vxsort (uint8_t** item_array, ptrdiff_t item_count, uint8_t* range_low, uint8_t* range_high) { // above this threshold, using AVX2 for sorting will likely pay off // despite possible downclocking on some devices const size_t AVX2_THRESHOLD_SIZE = 8 * 1024; // above this threshold, using AVX51F for sorting will likely pay off // despite possible downclocking on current devices const size_t AVX512F_THRESHOLD_SIZE = 128 * 1024; if (item_count <= 1) return; if (IsSupportedInstructionSet (InstructionSet::AVX2) && (item_count > AVX2_THRESHOLD_SIZE)) { dprintf(3, ("Sorting mark lists")); // use AVX512F only if the list is large enough to pay for downclocking impact if (IsSupportedInstructionSet (InstructionSet::AVX512F) && (item_count > AVX512F_THRESHOLD_SIZE)) { do_vxsort_avx512 (item_array, &item_array[item_count - 1], range_low, range_high); } else { do_vxsort_avx2 (item_array, &item_array[item_count - 1], range_low, range_high); } } else { dprintf (3, ("Sorting mark lists")); introsort::sort (item_array, &item_array[item_count - 1], 0); } #ifdef _DEBUG // check the array is sorted for (ptrdiff_t i = 0; i < item_count - 1; i++) { assert (item_array[i] <= item_array[i + 1]); } // check that the ends of the array are indeed in range // together with the above this implies all elements are in range assert ((range_low <= item_array[0]) && (item_array[item_count - 1] <= range_high)); #endif } #endif //USE_VXSORT #ifdef MULTIPLE_HEAPS static size_t target_mark_count_for_heap (size_t total_mark_count, int heap_count, int heap_number) { // compute the average (rounded down) size_t average_mark_count = total_mark_count / heap_count; // compute the remainder size_t remaining_mark_count = total_mark_count - (average_mark_count * heap_count); // compute the target count for this heap - last heap has the remainder if (heap_number == (heap_count - 1)) return (average_mark_count + remaining_mark_count); else return average_mark_count; } NOINLINE uint8_t** gc_heap::equalize_mark_lists (size_t total_mark_list_size) { size_t local_mark_count[MAX_SUPPORTED_CPUS]; size_t total_mark_count = 0; // compute mark count per heap into a local array // compute the total for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; size_t mark_count = hp->mark_list_index - hp->mark_list; local_mark_count[i] = mark_count; total_mark_count += mark_count; } // this should agree with our input parameter assert(total_mark_count == total_mark_list_size); // compute the target count for this heap size_t this_target_mark_count = target_mark_count_for_heap (total_mark_count, n_heaps, heap_number); // if our heap has sufficient entries, we can exit early if (local_mark_count[heap_number] >= this_target_mark_count) return (mark_list + this_target_mark_count); // In the following, we try to fill the deficit in heap "deficit_heap_index" with // surplus from "surplus_heap_index". // If there is no deficit or surplus (anymore), the indices are advanced. int surplus_heap_index = 0; for (int deficit_heap_index = 0; deficit_heap_index <= heap_number; deficit_heap_index++) { // compute the target count for this heap - last heap has the remainder size_t deficit_target_mark_count = target_mark_count_for_heap (total_mark_count, n_heaps, deficit_heap_index); // if this heap has the target or larger count, skip it if (local_mark_count[deficit_heap_index] >= deficit_target_mark_count) continue; // while this heap is lower than average, fill it up while ((surplus_heap_index < n_heaps) && (local_mark_count[deficit_heap_index] < deficit_target_mark_count)) { size_t deficit = deficit_target_mark_count - local_mark_count[deficit_heap_index]; size_t surplus_target_mark_count = target_mark_count_for_heap(total_mark_count, n_heaps, surplus_heap_index); if (local_mark_count[surplus_heap_index] > surplus_target_mark_count) { size_t surplus = local_mark_count[surplus_heap_index] - surplus_target_mark_count; size_t amount_to_transfer = min(deficit, surplus); local_mark_count[surplus_heap_index] -= amount_to_transfer; if (deficit_heap_index == heap_number) { // copy amount_to_transfer mark list items memcpy(&g_heaps[deficit_heap_index]->mark_list[local_mark_count[deficit_heap_index]], &g_heaps[surplus_heap_index]->mark_list[local_mark_count[surplus_heap_index]], (amount_to_transfer*sizeof(mark_list[0]))); } local_mark_count[deficit_heap_index] += amount_to_transfer; } else { surplus_heap_index++; } } } return (mark_list + local_mark_count[heap_number]); } NOINLINE size_t gc_heap::sort_mark_list() { if ((settings.condemned_generation >= max_generation) #ifdef USE_REGIONS || (g_mark_list_piece == nullptr) #endif //USE_REGIONS ) { // fake a mark list overflow so merge_mark_lists knows to quit early mark_list_index = mark_list_end + 1; return 0; } // if this heap had a mark list overflow, we don't do anything if (mark_list_index > mark_list_end) { dprintf (2, ("h%d sort_mark_list overflow", heap_number)); mark_list_overflow = true; return 0; } // if any other heap had a mark list overflow, we fake one too, // so we don't use an incomplete mark list by mistake for (int i = 0; i < n_heaps; i++) { if (g_heaps[i]->mark_list_index > g_heaps[i]->mark_list_end) { mark_list_index = mark_list_end + 1; dprintf (2, ("h%d sort_mark_list: detected overflow on heap %d", heap_number, i)); return 0; } } // compute total mark list size and total ephemeral size size_t total_mark_list_size = 0; size_t total_ephemeral_size = 0; uint8_t* low = (uint8_t*)~0; uint8_t* high = 0; for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; total_mark_list_size += (hp->mark_list_index - hp->mark_list); #ifdef USE_REGIONS // iterate through the ephemeral regions to get a tighter bound for (int gen_num = settings.condemned_generation; gen_num >= 0; gen_num--) { generation* gen = hp->generation_of (gen_num); for (heap_segment* seg = generation_start_segment (gen); seg != nullptr; seg = heap_segment_next (seg)) { size_t ephemeral_size = heap_segment_allocated (seg) - heap_segment_mem (seg); total_ephemeral_size += ephemeral_size; low = min (low, heap_segment_mem (seg)); high = max (high, heap_segment_allocated (seg)); } } #else //USE_REGIONS size_t ephemeral_size = heap_segment_allocated (hp->ephemeral_heap_segment) - hp->gc_low; total_ephemeral_size += ephemeral_size; low = min (low, hp->gc_low); high = max (high, heap_segment_allocated (hp->ephemeral_heap_segment)); #endif //USE_REGIONS } // give up if the mark list size is unreasonably large if (total_mark_list_size > (total_ephemeral_size / 256)) { mark_list_index = mark_list_end + 1; // let's not count this as a mark list overflow dprintf (2, ("h%d total mark list %Id is too large > (%Id / 256), don't use", heap_number, total_mark_list_size, total_ephemeral_size)); mark_list_overflow = false; return 0; } uint8_t **local_mark_list_index = equalize_mark_lists (total_mark_list_size); #ifdef USE_VXSORT ptrdiff_t item_count = local_mark_list_index - mark_list; //#define WRITE_SORT_DATA #if defined(_DEBUG) || defined(WRITE_SORT_DATA) // in debug, make a copy of the mark list // for checking and debugging purposes uint8_t** mark_list_copy = &g_mark_list_copy[heap_number * mark_list_size]; uint8_t** mark_list_copy_index = &mark_list_copy[item_count]; for (ptrdiff_t i = 0; i < item_count; i++) { uint8_t* item = mark_list[i]; assert ((low <= item) && (item < high)); mark_list_copy[i] = item; } #endif // _DEBUG || WRITE_SORT_DATA do_vxsort (mark_list, item_count, low, high); #ifdef WRITE_SORT_DATA char file_name[256]; sprintf_s (file_name, ARRAY_SIZE(file_name), "sort_data_gc%d_heap%d", settings.gc_index, heap_number); FILE* f; errno_t err = fopen_s (&f, file_name, "wb"); if (err == 0) { size_t magic = 'SDAT'; if (fwrite (&magic, sizeof(magic), 1, f) != 1) dprintf (3, ("fwrite failed\n")); if (fwrite (&elapsed_cycles, sizeof(elapsed_cycles), 1, f) != 1) dprintf (3, ("fwrite failed\n")); if (fwrite (&low, sizeof(low), 1, f) != 1) dprintf (3, ("fwrite failed\n")); if (fwrite (&item_count, sizeof(item_count), 1, f) != 1) dprintf (3, ("fwrite failed\n")); if (fwrite (mark_list_copy, sizeof(mark_list_copy[0]), item_count, f) != item_count) dprintf (3, ("fwrite failed\n")); if (fwrite (&magic, sizeof(magic), 1, f) != 1) dprintf (3, ("fwrite failed\n")); if (fclose (f) != 0) dprintf (3, ("fclose failed\n")); } #endif #ifdef _DEBUG // in debug, sort the copy as well using the proven sort, so we can check we got the right result if (mark_list_copy_index > mark_list_copy) { introsort::sort (mark_list_copy, mark_list_copy_index - 1, 0); } for (ptrdiff_t i = 0; i < item_count; i++) { uint8_t* item = mark_list[i]; assert (mark_list_copy[i] == item); } #endif //_DEBUG #else //USE_VXSORT dprintf (3, ("Sorting mark lists")); if (local_mark_list_index > mark_list) { introsort::sort (mark_list, local_mark_list_index - 1, 0); } #endif //USE_VXSORT uint8_t** x = mark_list; #ifdef USE_REGIONS // first set the pieces for all regions to empty assert (g_mark_list_piece_size >= region_count); for (size_t region_index = 0; region_index < region_count; region_index++) { mark_list_piece_start[region_index] = NULL; mark_list_piece_end[region_index] = NULL; } // predicate means: x is still within the mark list, and within the bounds of this region #define predicate(x) (((x) < local_mark_list_index) && (*(x) < region_limit)) while (x < local_mark_list_index) { heap_segment* region = get_region_info_for_address (*x); // sanity check - the object on the mark list should be within the region assert ((heap_segment_mem (region) <= *x) && (*x < heap_segment_allocated (region))); size_t region_index = get_basic_region_index_for_address (heap_segment_mem (region)); uint8_t* region_limit = heap_segment_allocated (region); uint8_t*** mark_list_piece_start_ptr = &mark_list_piece_start[region_index]; uint8_t*** mark_list_piece_end_ptr = &mark_list_piece_end[region_index]; #else // USE_REGIONS // predicate means: x is still within the mark list, and within the bounds of this heap #define predicate(x) (((x) < local_mark_list_index) && (*(x) < heap->ephemeral_high)) // first set the pieces for all heaps to empty int heap_num; for (heap_num = 0; heap_num < n_heaps; heap_num++) { mark_list_piece_start[heap_num] = NULL; mark_list_piece_end[heap_num] = NULL; } heap_num = -1; while (x < local_mark_list_index) { gc_heap* heap; // find the heap x points into - searching cyclically from the last heap, // because in many cases the right heap is the next one or comes soon after #ifdef _DEBUG int last_heap_num = heap_num; #endif //_DEBUG do { heap_num++; if (heap_num >= n_heaps) heap_num = 0; assert(heap_num != last_heap_num); // we should always find the heap - infinite loop if not! heap = g_heaps[heap_num]; } while (!(*x >= heap->ephemeral_low && *x < heap->ephemeral_high)); uint8_t*** mark_list_piece_start_ptr = &mark_list_piece_start[heap_num]; uint8_t*** mark_list_piece_end_ptr = &mark_list_piece_end[heap_num]; #endif // USE_REGIONS // x is the start of the mark list piece for this heap/region *mark_list_piece_start_ptr = x; // to find the end of the mark list piece for this heap/region, find the first x // that has !predicate(x), i.e. that is either not in this heap, or beyond the end of the list if (predicate(x)) { // let's see if we get lucky and the whole rest belongs to this piece if (predicate(local_mark_list_index -1)) { x = local_mark_list_index; *mark_list_piece_end_ptr = x; break; } // we play a variant of binary search to find the point sooner. // the first loop advances by increasing steps until the predicate turns false. // then we retreat the last step, and the second loop advances by decreasing steps, keeping the predicate true. unsigned inc = 1; do { inc *= 2; uint8_t** temp_x = x; x += inc; if (temp_x > x) { break; } } while (predicate(x)); // we know that only the last step was wrong, so we undo it x -= inc; do { // loop invariant - predicate holds at x, but not x + inc assert (predicate(x) && !(((x + inc) > x) && predicate(x + inc))); inc /= 2; if (((x + inc) > x) && predicate(x + inc)) { x += inc; } } while (inc > 1); // the termination condition and the loop invariant together imply this: assert(predicate(x) && !predicate(x + inc) && (inc == 1)); // so the spot we're looking for is one further x += 1; } *mark_list_piece_end_ptr = x; } #undef predicate return total_mark_list_size; } void gc_heap::append_to_mark_list (uint8_t **start, uint8_t **end) { size_t slots_needed = end - start; size_t slots_available = mark_list_end + 1 - mark_list_index; size_t slots_to_copy = min(slots_needed, slots_available); memcpy(mark_list_index, start, slots_to_copy*sizeof(*start)); mark_list_index += slots_to_copy; dprintf (3, ("h%d: appended %Id slots to mark_list\n", heap_number, slots_to_copy)); } #ifdef _DEBUG #if !defined(_MSC_VER) #if !defined(__cdecl) #if defined(__i386__) #define __cdecl __attribute__((cdecl)) #else #define __cdecl #endif #endif #endif static int __cdecl cmp_mark_list_item (const void* vkey, const void* vdatum) { uint8_t** key = (uint8_t**)vkey; uint8_t** datum = (uint8_t**)vdatum; if (*key < *datum) return -1; else if (*key > *datum) return 1; else return 0; } #endif // _DEBUG #ifdef USE_REGIONS uint8_t** gc_heap::get_region_mark_list (uint8_t* start, uint8_t* end, uint8_t*** mark_list_end_ptr) { size_t region_number = get_basic_region_index_for_address (start); size_t source_number = region_number; #else //USE_REGIONS void gc_heap::merge_mark_lists (size_t total_mark_list_size) { // in case of mark list overflow, don't bother if (total_mark_list_size == 0) { return; } #ifdef _DEBUG // if we had more than the average number of mark list items, // make sure these got copied to another heap, i.e. didn't get lost size_t this_mark_list_size = target_mark_count_for_heap (total_mark_list_size, n_heaps, heap_number); for (uint8_t** p = mark_list + this_mark_list_size; p < mark_list_index; p++) { uint8_t* item = *p; uint8_t** found_slot = nullptr; for (int i = 0; i < n_heaps; i++) { uint8_t** heap_mark_list = &g_mark_list[i * mark_list_size]; size_t heap_mark_list_size = target_mark_count_for_heap (total_mark_list_size, n_heaps, i); found_slot = (uint8_t**)bsearch (&item, heap_mark_list, heap_mark_list_size, sizeof(item), cmp_mark_list_item); if (found_slot != nullptr) break; } assert ((found_slot != nullptr) && (*found_slot == item)); } #endif dprintf(3, ("merge_mark_lists: heap_number = %d starts out with %Id entries", heap_number, (mark_list_index - mark_list))); int source_number = heap_number; #endif //USE_REGIONS uint8_t** source[MAX_SUPPORTED_CPUS]; uint8_t** source_end[MAX_SUPPORTED_CPUS]; int source_heap[MAX_SUPPORTED_CPUS]; int source_count = 0; for (int i = 0; i < n_heaps; i++) { gc_heap* heap = g_heaps[i]; if (heap->mark_list_piece_start[source_number] < heap->mark_list_piece_end[source_number]) { source[source_count] = heap->mark_list_piece_start[source_number]; source_end[source_count] = heap->mark_list_piece_end[source_number]; source_heap[source_count] = i; if (source_count < MAX_SUPPORTED_CPUS) source_count++; } } dprintf(3, ("source_number = %d has %d sources\n", source_number, source_count)); #if defined(_DEBUG) || defined(TRACE_GC) for (int j = 0; j < source_count; j++) { dprintf(3, ("source_number = %d ", source_number)); dprintf(3, (" source from heap %d = %Ix .. %Ix (%Id entries)", (size_t)(source_heap[j]), (size_t)(source[j][0]), (size_t)(source_end[j][-1]), (size_t)(source_end[j] - source[j]))); // the sources should all be sorted for (uint8_t **x = source[j]; x < source_end[j] - 1; x++) { if (x[0] > x[1]) { dprintf(3, ("oops, mark_list from source %d for heap %d isn't sorted\n", j, source_number)); assert (0); } } } #endif //_DEBUG || TRACE_GC mark_list = &g_mark_list_copy [heap_number*mark_list_size]; mark_list_index = mark_list; mark_list_end = &mark_list [mark_list_size-1]; int piece_count = 0; if (source_count == 0) { ; // nothing to do } else if (source_count == 1) { mark_list = source[0]; mark_list_index = source_end[0]; mark_list_end = mark_list_index; piece_count++; } else { while (source_count > 1) { // find the lowest and second lowest value in the sources we're merging from int lowest_source = 0; uint8_t *lowest = *source[0]; uint8_t *second_lowest = *source[1]; for (int i = 1; i < source_count; i++) { if (lowest > *source[i]) { second_lowest = lowest; lowest = *source[i]; lowest_source = i; } else if (second_lowest > *source[i]) { second_lowest = *source[i]; } } // find the point in the lowest source where it either runs out or is not <= second_lowest anymore // let's first try to get lucky and see if the whole source is <= second_lowest -- this is actually quite common uint8_t **x; if (source_end[lowest_source][-1] <= second_lowest) x = source_end[lowest_source]; else { // use linear search to find the end -- could also use binary search as in sort_mark_list, // but saw no improvement doing that for (x = source[lowest_source]; x < source_end[lowest_source] && *x <= second_lowest; x++) ; } // blast this piece to the mark list append_to_mark_list(source[lowest_source], x); piece_count++; source[lowest_source] = x; // check whether this source is now exhausted if (x >= source_end[lowest_source]) { // if it's not the source with the highest index, copy the source with the highest index // over it so the non-empty sources are always at the beginning if (lowest_source < source_count-1) { source[lowest_source] = source[source_count-1]; source_end[lowest_source] = source_end[source_count-1]; } source_count--; } } // we're left with just one source that we copy append_to_mark_list(source[0], source_end[0]); piece_count++; } #if defined(_DEBUG) || defined(TRACE_GC) // the final mark list must be sorted for (uint8_t **x = mark_list; x < mark_list_index - 1; x++) { if (x[0] > x[1]) { dprintf(3, ("oops, mark_list for heap %d isn't sorted at the end of merge_mark_lists", heap_number)); assert (0); } } #endif //_DEBUG || TRACE_GC #ifdef USE_REGIONS *mark_list_end_ptr = mark_list_index; return mark_list; #endif // USE_REGIONS } #else #ifdef USE_REGIONS // a variant of binary search that doesn't look for an exact match, // but finds the first element >= e static uint8_t** binary_search (uint8_t** left, uint8_t** right, uint8_t* e) { if (left == right) return left; assert (left < right); uint8_t** a = left; size_t l = 0; size_t r = (size_t)(right - left); while ((r - l) >= 2) { size_t m = l + (r - l) / 2; // loop condition says that r - l is at least 2 // so l, m, r are all different assert ((l < m) && (m < r)); if (a[m] < e) { l = m; } else { r = m; } } if (a[l] < e) return a + l + 1; else return a + l; } uint8_t** gc_heap::get_region_mark_list (uint8_t* start, uint8_t* end, uint8_t*** mark_list_end_ptr) { // do a binary search over the sorted marked list to find start and end of the // mark list for this region *mark_list_end_ptr = binary_search (mark_list, mark_list_index, end); return binary_search (mark_list, *mark_list_end_ptr, start); } #endif //USE_REGIONS #endif //MULTIPLE_HEAPS void gc_heap::grow_mark_list () { // with vectorized sorting, we can use bigger mark lists #ifdef USE_VXSORT #ifdef MULTIPLE_HEAPS const size_t MAX_MARK_LIST_SIZE = IsSupportedInstructionSet (InstructionSet::AVX2) ? (1000 * 1024) : (200 * 1024); #else //MULTIPLE_HEAPS const size_t MAX_MARK_LIST_SIZE = IsSupportedInstructionSet (InstructionSet::AVX2) ? (32 * 1024) : (16 * 1024); #endif //MULTIPLE_HEAPS #else //USE_VXSORT #ifdef MULTIPLE_HEAPS const size_t MAX_MARK_LIST_SIZE = 200 * 1024; #else //MULTIPLE_HEAPS const size_t MAX_MARK_LIST_SIZE = 16 * 1024; #endif //MULTIPLE_HEAPS #endif //USE_VXSORT size_t new_mark_list_size = min (mark_list_size * 2, MAX_MARK_LIST_SIZE); if (new_mark_list_size == mark_list_size) return; #ifdef MULTIPLE_HEAPS uint8_t** new_mark_list = make_mark_list (new_mark_list_size * n_heaps); uint8_t** new_mark_list_copy = make_mark_list (new_mark_list_size * n_heaps); if ((new_mark_list != nullptr) && (new_mark_list_copy != nullptr)) { delete[] g_mark_list; g_mark_list = new_mark_list; delete[] g_mark_list_copy; g_mark_list_copy = new_mark_list_copy; mark_list_size = new_mark_list_size; } else { delete[] new_mark_list; delete[] new_mark_list_copy; } #else //MULTIPLE_HEAPS uint8_t** new_mark_list = make_mark_list (new_mark_list_size); if (new_mark_list != nullptr) { delete[] mark_list; g_mark_list = new_mark_list; mark_list_size = new_mark_list_size; } #endif //MULTIPLE_HEAPS } class seg_free_spaces { struct seg_free_space { BOOL is_plug; void* start; }; struct free_space_bucket { seg_free_space* free_space; ptrdiff_t count_add; // Assigned when we first construct the array. ptrdiff_t count_fit; // How many items left when we are fitting plugs. }; void move_bucket (int old_power2, int new_power2) { // PREFAST warning 22015: old_power2 could be negative assert (old_power2 >= 0); assert (old_power2 >= new_power2); if (old_power2 == new_power2) { return; } seg_free_space* src_index = free_space_buckets[old_power2].free_space; for (int i = old_power2; i > new_power2; i--) { seg_free_space** dest = &(free_space_buckets[i].free_space); (*dest)++; seg_free_space* dest_index = free_space_buckets[i - 1].free_space; if (i > (new_power2 + 1)) { seg_free_space temp = *src_index; *src_index = *dest_index; *dest_index = temp; } src_index = dest_index; } free_space_buckets[old_power2].count_fit--; free_space_buckets[new_power2].count_fit++; } #ifdef _DEBUG void dump_free_space (seg_free_space* item) { uint8_t* addr = 0; size_t len = 0; if (item->is_plug) { mark* m = (mark*)(item->start); len = pinned_len (m); addr = pinned_plug (m) - len; } else { heap_segment* seg = (heap_segment*)(item->start); addr = heap_segment_plan_allocated (seg); len = heap_segment_committed (seg) - addr; } dprintf (SEG_REUSE_LOG_1, ("[%d]0x%Ix %Id", heap_num, addr, len)); } void dump() { seg_free_space* item = NULL; int i = 0; dprintf (SEG_REUSE_LOG_1, ("[%d]----------------------------------\nnow the free spaces look like:", heap_num)); for (i = 0; i < (free_space_bucket_count - 1); i++) { dprintf (SEG_REUSE_LOG_1, ("[%d]Free spaces for 2^%d bucket:", heap_num, (base_power2 + i))); dprintf (SEG_REUSE_LOG_1, ("[%d]%s %s", heap_num, "start", "len")); item = free_space_buckets[i].free_space; while (item < free_space_buckets[i + 1].free_space) { dump_free_space (item); item++; } dprintf (SEG_REUSE_LOG_1, ("[%d]----------------------------------", heap_num)); } dprintf (SEG_REUSE_LOG_1, ("[%d]Free spaces for 2^%d bucket:", heap_num, (base_power2 + i))); dprintf (SEG_REUSE_LOG_1, ("[%d]%s %s", heap_num, "start", "len")); item = free_space_buckets[i].free_space; while (item <= &seg_free_space_array[free_space_item_count - 1]) { dump_free_space (item); item++; } dprintf (SEG_REUSE_LOG_1, ("[%d]----------------------------------", heap_num)); } #endif //_DEBUG free_space_bucket* free_space_buckets; seg_free_space* seg_free_space_array; ptrdiff_t free_space_bucket_count; ptrdiff_t free_space_item_count; int base_power2; int heap_num; #ifdef _DEBUG BOOL has_end_of_seg; #endif //_DEBUG public: seg_free_spaces (int h_number) { heap_num = h_number; } BOOL alloc () { size_t total_prealloc_size = MAX_NUM_BUCKETS * sizeof (free_space_bucket) + MAX_NUM_FREE_SPACES * sizeof (seg_free_space); free_space_buckets = (free_space_bucket*) new (nothrow) uint8_t[total_prealloc_size]; return (!!free_space_buckets); } // We take the ordered free space array we got from the 1st pass, // and feed the portion that we decided to use to this method, ie, // the largest item_count free spaces. void add_buckets (int base, size_t* ordered_free_spaces, int bucket_count, size_t item_count) { assert (free_space_buckets); assert (item_count <= (size_t)MAX_PTR); free_space_bucket_count = bucket_count; free_space_item_count = item_count; base_power2 = base; #ifdef _DEBUG has_end_of_seg = FALSE; #endif //_DEBUG ptrdiff_t total_item_count = 0; ptrdiff_t i = 0; seg_free_space_array = (seg_free_space*)(free_space_buckets + free_space_bucket_count); for (i = 0; i < (ptrdiff_t)item_count; i++) { seg_free_space_array[i].start = 0; seg_free_space_array[i].is_plug = FALSE; } for (i = 0; i < bucket_count; i++) { free_space_buckets[i].count_add = ordered_free_spaces[i]; free_space_buckets[i].count_fit = ordered_free_spaces[i]; free_space_buckets[i].free_space = &seg_free_space_array[total_item_count]; total_item_count += free_space_buckets[i].count_add; } assert (total_item_count == (ptrdiff_t)item_count); } // If we are adding a free space before a plug we pass the // mark stack position so we can update the length; we could // also be adding the free space after the last plug in which // case start is the segment which we'll need to update the // heap_segment_plan_allocated. void add (void* start, BOOL plug_p, BOOL first_p) { size_t size = (plug_p ? pinned_len ((mark*)start) : (heap_segment_committed ((heap_segment*)start) - heap_segment_plan_allocated ((heap_segment*)start))); if (plug_p) { dprintf (SEG_REUSE_LOG_1, ("[%d]Adding a free space before plug: %Id", heap_num, size)); } else { dprintf (SEG_REUSE_LOG_1, ("[%d]Adding a free space at end of seg: %Id", heap_num, size)); #ifdef _DEBUG has_end_of_seg = TRUE; #endif //_DEBUG } if (first_p) { size_t eph_gen_starts = gc_heap::eph_gen_starts_size; size -= eph_gen_starts; if (plug_p) { mark* m = (mark*)(start); pinned_len (m) -= eph_gen_starts; } else { heap_segment* seg = (heap_segment*)start; heap_segment_plan_allocated (seg) += eph_gen_starts; } } int bucket_power2 = index_of_highest_set_bit (size); if (bucket_power2 < base_power2) { return; } free_space_bucket* bucket = &free_space_buckets[bucket_power2 - base_power2]; seg_free_space* bucket_free_space = bucket->free_space; assert (plug_p || (!plug_p && bucket->count_add)); if (bucket->count_add == 0) { dprintf (SEG_REUSE_LOG_1, ("[%d]Already have enough of 2^%d", heap_num, bucket_power2)); return; } ptrdiff_t index = bucket->count_add - 1; dprintf (SEG_REUSE_LOG_1, ("[%d]Building free spaces: adding %Ix; len: %Id (2^%d)", heap_num, (plug_p ? (pinned_plug ((mark*)start) - pinned_len ((mark*)start)) : heap_segment_plan_allocated ((heap_segment*)start)), size, bucket_power2)); if (plug_p) { bucket_free_space[index].is_plug = TRUE; } bucket_free_space[index].start = start; bucket->count_add--; } #ifdef _DEBUG // Do a consistency check after all free spaces are added. void check() { ptrdiff_t i = 0; int end_of_seg_count = 0; for (i = 0; i < free_space_item_count; i++) { assert (seg_free_space_array[i].start); if (!(seg_free_space_array[i].is_plug)) { end_of_seg_count++; } } if (has_end_of_seg) { assert (end_of_seg_count == 1); } else { assert (end_of_seg_count == 0); } for (i = 0; i < free_space_bucket_count; i++) { assert (free_space_buckets[i].count_add == 0); } } #endif //_DEBUG uint8_t* fit (uint8_t* old_loc, size_t plug_size REQD_ALIGN_AND_OFFSET_DCL) { if (old_loc) { #ifdef SHORT_PLUGS assert (!is_plug_padded (old_loc)); #endif //SHORT_PLUGS assert (!node_realigned (old_loc)); } size_t saved_plug_size = plug_size; #ifdef FEATURE_STRUCTALIGN // BARTOKTODO (4841): this code path is disabled (see can_fit_all_blocks_p) until we take alignment requirements into account _ASSERTE(requiredAlignment == DATA_ALIGNMENT && false); #endif // FEATURE_STRUCTALIGN size_t plug_size_to_fit = plug_size; // best fit is only done for gen1 to gen2 and we do not pad in gen2. // however we must account for requirements of large alignment. // which may result in realignment padding. #ifdef RESPECT_LARGE_ALIGNMENT plug_size_to_fit += switch_alignment_size(FALSE); #endif //RESPECT_LARGE_ALIGNMENT int plug_power2 = index_of_highest_set_bit (round_up_power2 (plug_size_to_fit + Align(min_obj_size))); ptrdiff_t i; uint8_t* new_address = 0; if (plug_power2 < base_power2) { plug_power2 = base_power2; } int chosen_power2 = plug_power2 - base_power2; retry: for (i = chosen_power2; i < free_space_bucket_count; i++) { if (free_space_buckets[i].count_fit != 0) { break; } chosen_power2++; } dprintf (SEG_REUSE_LOG_1, ("[%d]Fitting plug len %Id (2^%d) using 2^%d free space", heap_num, plug_size, plug_power2, (chosen_power2 + base_power2))); assert (i < free_space_bucket_count); seg_free_space* bucket_free_space = free_space_buckets[chosen_power2].free_space; ptrdiff_t free_space_count = free_space_buckets[chosen_power2].count_fit; size_t new_free_space_size = 0; BOOL can_fit = FALSE; size_t pad = 0; for (i = 0; i < free_space_count; i++) { size_t free_space_size = 0; pad = 0; if (bucket_free_space[i].is_plug) { mark* m = (mark*)(bucket_free_space[i].start); uint8_t* plug_free_space_start = pinned_plug (m) - pinned_len (m); if (!((old_loc == 0) || same_large_alignment_p (old_loc, plug_free_space_start))) { pad = switch_alignment_size (FALSE); } plug_size = saved_plug_size + pad; free_space_size = pinned_len (m); new_address = pinned_plug (m) - pinned_len (m); if (free_space_size >= (plug_size + Align (min_obj_size)) || free_space_size == plug_size) { new_free_space_size = free_space_size - plug_size; pinned_len (m) = new_free_space_size; #ifdef SIMPLE_DPRINTF dprintf (SEG_REUSE_LOG_0, ("[%d]FP: 0x%Ix->0x%Ix(%Ix)(%Ix), [0x%Ix (2^%d) -> [0x%Ix (2^%d)", heap_num, old_loc, new_address, (plug_size - pad), pad, pinned_plug (m), index_of_highest_set_bit (free_space_size), (pinned_plug (m) - pinned_len (m)), index_of_highest_set_bit (new_free_space_size))); #endif //SIMPLE_DPRINTF if (pad != 0) { set_node_realigned (old_loc); } can_fit = TRUE; } } else { heap_segment* seg = (heap_segment*)(bucket_free_space[i].start); free_space_size = heap_segment_committed (seg) - heap_segment_plan_allocated (seg); if (!((old_loc == 0) || same_large_alignment_p (old_loc, heap_segment_plan_allocated (seg)))) { pad = switch_alignment_size (FALSE); } plug_size = saved_plug_size + pad; if (free_space_size >= (plug_size + Align (min_obj_size)) || free_space_size == plug_size) { new_address = heap_segment_plan_allocated (seg); new_free_space_size = free_space_size - plug_size; heap_segment_plan_allocated (seg) = new_address + plug_size; #ifdef SIMPLE_DPRINTF dprintf (SEG_REUSE_LOG_0, ("[%d]FS: 0x%Ix-> 0x%Ix(%Ix) (2^%d) -> 0x%Ix (2^%d)", heap_num, old_loc, new_address, (plug_size - pad), index_of_highest_set_bit (free_space_size), heap_segment_plan_allocated (seg), index_of_highest_set_bit (new_free_space_size))); #endif //SIMPLE_DPRINTF if (pad != 0) set_node_realigned (old_loc); can_fit = TRUE; } } if (can_fit) { break; } } if (!can_fit) { assert (chosen_power2 == 0); chosen_power2 = 1; goto retry; } new_address += pad; assert ((chosen_power2 && (i == 0)) || ((!chosen_power2) && (i < free_space_count))); int new_bucket_power2 = index_of_highest_set_bit (new_free_space_size); if (new_bucket_power2 < base_power2) { new_bucket_power2 = base_power2; } move_bucket (chosen_power2, new_bucket_power2 - base_power2); //dump(); return new_address; } void cleanup () { if (free_space_buckets) { delete [] free_space_buckets; } if (seg_free_space_array) { delete [] seg_free_space_array; } } }; #define marked(i) header(i)->IsMarked() #define set_marked(i) header(i)->SetMarked() #define clear_marked(i) header(i)->ClearMarked() #define pinned(i) header(i)->IsPinned() #define set_pinned(i) header(i)->SetPinned() #define clear_pinned(i) header(i)->GetHeader()->ClrGCBit(); inline size_t my_get_size (Object* ob) { MethodTable* mT = header(ob)->GetMethodTable(); return (mT->GetBaseSize() + (mT->HasComponentSize() ? ((size_t)((CObjectHeader*)ob)->GetNumComponents() * mT->RawGetComponentSize()) : 0)); } //#define size(i) header(i)->GetSize() #define size(i) my_get_size (header(i)) #define contain_pointers(i) header(i)->ContainsPointers() #ifdef COLLECTIBLE_CLASS #define contain_pointers_or_collectible(i) header(i)->ContainsPointersOrCollectible() #define get_class_object(i) GCToEEInterface::GetLoaderAllocatorObjectForGC((Object *)i) #define is_collectible(i) method_table(i)->Collectible() #else //COLLECTIBLE_CLASS #define contain_pointers_or_collectible(i) header(i)->ContainsPointers() #endif //COLLECTIBLE_CLASS #ifdef BACKGROUND_GC inline void gc_heap::seg_clear_mark_array_bits_soh (heap_segment* seg) { uint8_t* range_beg = 0; uint8_t* range_end = 0; if (bgc_mark_array_range (seg, FALSE, &range_beg, &range_end)) { clear_mark_array (range_beg, align_on_mark_word (range_end), FALSE #ifdef FEATURE_BASICFREEZE , TRUE #endif // FEATURE_BASICFREEZE ); } } void gc_heap::bgc_clear_batch_mark_array_bits (uint8_t* start, uint8_t* end) { if ((start < background_saved_highest_address) && (end > background_saved_lowest_address)) { start = max (start, background_saved_lowest_address); end = min (end, background_saved_highest_address); size_t start_mark_bit = mark_bit_of (start); size_t end_mark_bit = mark_bit_of (end); unsigned int startbit = mark_bit_bit (start_mark_bit); unsigned int endbit = mark_bit_bit (end_mark_bit); size_t startwrd = mark_bit_word (start_mark_bit); size_t endwrd = mark_bit_word (end_mark_bit); dprintf (3, ("Clearing all mark array bits between [%Ix:%Ix-[%Ix:%Ix", (size_t)start, (size_t)start_mark_bit, (size_t)end, (size_t)end_mark_bit)); unsigned int firstwrd = lowbits (~0, startbit); unsigned int lastwrd = highbits (~0, endbit); if (startwrd == endwrd) { if (startbit != endbit) { unsigned int wrd = firstwrd | lastwrd; mark_array[startwrd] &= wrd; } else { assert (start == end); } return; } // clear the first mark word. if (startbit) { mark_array[startwrd] &= firstwrd; startwrd++; } for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++) { mark_array[wrdtmp] = 0; } // clear the last mark word. if (endbit) { mark_array[endwrd] &= lastwrd; } } } #endif //BACKGROUND_GC inline BOOL gc_heap::is_mark_set (uint8_t* o) { return marked (o); } #if defined (_MSC_VER) && defined (TARGET_X86) #pragma optimize("y", on) // Small critical routines, don't put in EBP frame #endif //_MSC_VER && TARGET_X86 // return the generation number of an object. // It is assumed that the object is valid. // Note that this will return max_generation for UOH objects int gc_heap::object_gennum (uint8_t* o) { #ifdef USE_REGIONS return get_region_gen_num (o); #else if (in_range_for_segment (o, ephemeral_heap_segment) && (o >= generation_allocation_start (generation_of (max_generation - 1)))) { // in an ephemeral generation. for ( int i = 0; i < max_generation-1; i++) { if ((o >= generation_allocation_start (generation_of (i)))) return i; } return max_generation-1; } else { return max_generation; } #endif //USE_REGIONS } int gc_heap::object_gennum_plan (uint8_t* o) { #ifdef USE_REGIONS return get_region_plan_gen_num (o); #else if (in_range_for_segment (o, ephemeral_heap_segment)) { for (int i = 0; i < ephemeral_generation_count; i++) { uint8_t* plan_start = generation_plan_allocation_start (generation_of (i)); if (plan_start && (o >= plan_start)) { return i; } } } return max_generation; #endif //USE_REGIONS } #if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("", on) // Go back to command line default optimizations #endif //_MSC_VER && TARGET_X86 #ifdef USE_REGIONS void get_initial_region(int gen, int hn, uint8_t** region_start, uint8_t** region_end) { *region_start = initial_regions[hn][gen][0]; *region_end = initial_regions[hn][gen][1]; } bool gc_heap::initial_make_soh_regions (gc_heap* hp) { uint8_t* region_start; uint8_t* region_end; uint32_t hn = 0; #ifdef MULTIPLE_HEAPS hn = hp->heap_number; #endif //MULTIPLE_HEAPS for (int i = max_generation; i >= 0; i--) { get_initial_region(i, hn, &region_start, &region_end); size_t region_size = region_end - region_start; heap_segment* current_region = make_heap_segment (region_start, region_size, hp, i); if (current_region == nullptr) { return false; } uint8_t* gen_start = heap_segment_mem (current_region); make_generation (i, current_region, gen_start); if (i == 0) { ephemeral_heap_segment = current_region; alloc_allocated = heap_segment_allocated (current_region); } } for (int i = max_generation; i >= 0; i--) { dprintf (REGIONS_LOG, ("h%d gen%d alloc seg is %Ix, start seg is %Ix (%Ix-%Ix)", heap_number, i, generation_allocation_segment (generation_of (i)), generation_start_segment (generation_of (i)), heap_segment_mem (generation_start_segment (generation_of (i))), heap_segment_allocated (generation_start_segment (generation_of (i))))); } return true; } bool gc_heap::initial_make_uoh_regions (int gen, gc_heap* hp) { uint8_t* region_start; uint8_t* region_end; uint32_t hn = 0; #ifdef MULTIPLE_HEAPS hn = hp->heap_number; #endif //MULTIPLE_HEAPS get_initial_region(gen, hn, &region_start, &region_end); size_t region_size = region_end - region_start; heap_segment* uoh_region = make_heap_segment (region_start, region_size, hp, gen); if (uoh_region == nullptr) { return false; } uoh_region->flags |= (gen == loh_generation) ? heap_segment_flags_loh : heap_segment_flags_poh; uint8_t* gen_start = heap_segment_mem (uoh_region); make_generation (gen, uoh_region, gen_start); return true; } void gc_heap::clear_region_info (heap_segment* region) { if (!heap_segment_uoh_p (region)) { //cleanup the brick table back to the empty value clear_brick_table (heap_segment_mem (region), heap_segment_reserved (region)); } // we should really clear cards as well!! #ifdef BACKGROUND_GC ::record_changed_seg ((uint8_t*)region, heap_segment_reserved (region), settings.gc_index, current_bgc_state, seg_deleted); if (dt_high_memory_load_p()) { decommit_mark_array_by_seg (region); } #endif //BACKGROUND_GC } // Note that returning a region to free does not decommit. // REGIONS PERF TODO: should decommit if needed. void gc_heap::return_free_region (heap_segment* region) { clear_region_info (region); region_free_list::add_region_descending (region, free_regions); uint8_t* region_start = get_region_start (region); uint8_t* region_end = heap_segment_reserved (region); int num_basic_regions = (int)((region_end - region_start) >> min_segment_size_shr); dprintf (REGIONS_LOG, ("RETURNING region %Ix (%d basic regions) to free", heap_segment_mem (region), num_basic_regions)); for (int i = 0; i < num_basic_regions; i++) { uint8_t* basic_region_start = region_start + ((size_t)i << min_segment_size_shr); heap_segment* basic_region = get_region_info (basic_region_start); heap_segment_allocated (basic_region) = 0; #ifdef MULTIPLE_HEAPS heap_segment_heap (basic_region) = 0; #endif //MULTIPLE_HEAPS // I'm intentionally not resetting gen_num/plan_gen_num which will show us // which gen/plan gen this region was and that's useful for debugging. } } // USE_REGIONS TODO: SOH should be able to get a large region and split it up into basic regions // if needed. // USE_REGIONS TODO: In Server GC we should allow to get a free region from another heap. heap_segment* gc_heap::get_free_region (int gen_number, size_t size) { heap_segment* region = 0; if (gen_number <= max_generation) { assert (size == 0); region = free_regions[basic_free_region].unlink_region_front(); } else { const size_t LARGE_REGION_SIZE = global_region_allocator.get_large_region_alignment(); assert (size >= LARGE_REGION_SIZE); if (size == LARGE_REGION_SIZE) { // get it from the local list of large free regions if possible region = free_regions[large_free_region].unlink_region_front(); } else { // get it from the local list of huge free regions if possible region = free_regions[huge_free_region].unlink_smallest_region (size); if (region == nullptr) { ASSERT_HOLDING_SPIN_LOCK(&gc_lock); // get it from the global list of huge free regions region = global_free_huge_regions.unlink_smallest_region (size); } } } if (region) { uint8_t* region_start = get_region_start (region); uint8_t* region_end = heap_segment_reserved (region); init_heap_segment (region, __this, region_start, (region_end - region_start), gen_number); dprintf (REGIONS_LOG, ("h%d GFR get region %Ix (%Ix-%Ix) for gen%d", heap_number, (size_t)region, region_start, region_end, gen_number)); } else { // TODO: We should keep enough reserve in the free regions so we don't get OOM when // this is called within GC when we sweep. region = allocate_new_region (__this, gen_number, (gen_number > max_generation), size); } if (region) { if (!init_table_for_region (gen_number, region)) { region = 0; } } return region; } // Note that this gets the basic region index for obj. If the obj is in a large region, // this region may not be the start of it. heap_segment* gc_heap::region_of (uint8_t* obj) { size_t index = (size_t)obj >> gc_heap::min_segment_size_shr; seg_mapping* entry = &seg_mapping_table[index]; return (heap_segment*)entry; } heap_segment* gc_heap::get_region_at_index (size_t index) { index += (size_t)g_gc_lowest_address >> gc_heap::min_segment_size_shr; return (heap_segment*)(&seg_mapping_table[index]); } // For debugging purposes to check that a region looks sane and // do some logging. This was useful to sprinkle in various places // where we were threading regions. void gc_heap::check_seg_gen_num (heap_segment* seg) { #ifdef _DEBUG uint8_t* mem = heap_segment_mem (seg); if ((mem < g_gc_lowest_address) || (mem >= g_gc_highest_address)) { GCToOSInterface::DebugBreak(); } int alloc_seg_gen_num = get_region_gen_num (mem); int alloc_seg_plan_gen_num = get_region_plan_gen_num (mem); dprintf (3, ("seg %Ix->%Ix, num %d, %d", (size_t)seg, mem, alloc_seg_gen_num, alloc_seg_plan_gen_num)); #endif //_DEBUG } int gc_heap::get_region_gen_num (heap_segment* region) { return heap_segment_gen_num (region); } int gc_heap::get_region_gen_num (uint8_t* obj) { return heap_segment_gen_num (region_of (obj)); } int gc_heap::get_region_plan_gen_num (uint8_t* obj) { return heap_segment_plan_gen_num (region_of (obj)); } bool gc_heap::is_region_demoted (uint8_t* obj) { return heap_segment_demoted_p (region_of (obj)); } inline void gc_heap::set_region_gen_num (heap_segment* region, int gen_num) { assert (gen_num < (1 << (sizeof (uint8_t) * 8))); assert (gen_num >= 0); heap_segment_gen_num (region) = (uint8_t)gen_num; } inline void gc_heap::set_region_plan_gen_num (heap_segment* region, int plan_gen_num) { int gen_num = heap_segment_gen_num (region); int supposed_plan_gen_num = get_plan_gen_num (gen_num); dprintf (REGIONS_LOG, ("h%d setting plan gen on %Ix->%Ix(was gen%d) to %d(should be: %d) %s", heap_number, (size_t)region, heap_segment_mem (region), gen_num, plan_gen_num, supposed_plan_gen_num, ((plan_gen_num < supposed_plan_gen_num) ? "DEMOTED" : "ND"))); if (plan_gen_num < supposed_plan_gen_num) { if (!settings.demotion) { settings.demotion = TRUE; } get_gc_data_per_heap()->set_mechanism_bit (gc_demotion_bit); region->flags |= heap_segment_flags_demoted; } else { region->flags &= ~heap_segment_flags_demoted; } heap_segment_plan_gen_num (region) = plan_gen_num; } inline void gc_heap::set_region_plan_gen_num_sip (heap_segment* region, int plan_gen_num) { if (!heap_segment_swept_in_plan (region)) { set_region_plan_gen_num (region, plan_gen_num); } } #endif //USE_REGIONS int gc_heap::get_plan_gen_num (int gen_number) { return ((settings.promotion) ? min ((gen_number + 1), max_generation) : gen_number); } uint8_t* gc_heap::get_uoh_start_object (heap_segment* region, generation* gen) { #ifdef USE_REGIONS uint8_t* o = heap_segment_mem (region); #else uint8_t* o = generation_allocation_start (gen); assert(((CObjectHeader*)o)->IsFree()); size_t s = Align (size (o), get_alignment_constant (FALSE)); assert (s == AlignQword (min_obj_size)); //Skip the generation gap object o += s; #endif //USE_REGIONS return o; } uint8_t* gc_heap::get_soh_start_object (heap_segment* region, generation* gen) { #ifdef USE_REGIONS uint8_t* o = heap_segment_mem (region); #else uint8_t* o = generation_allocation_start (gen); #endif //USE_REGIONS return o; } size_t gc_heap::get_soh_start_obj_len (uint8_t* start_obj) { #ifdef USE_REGIONS return 0; #else return Align (size (start_obj)); #endif //USE_REGIONS } void gc_heap::clear_gen1_cards() { #if defined(_DEBUG) && !defined(USE_REGIONS) for (int x = 0; x <= max_generation; x++) { assert (generation_allocation_start (generation_of (x))); } #endif //_DEBUG && !USE_REGIONS if (!settings.demotion && settings.promotion) { //clear card for generation 1. generation 0 is empty #ifdef USE_REGIONS heap_segment* region = generation_start_segment (generation_of (1)); while (region) { clear_card_for_addresses (heap_segment_mem (region), heap_segment_allocated (region)); region = heap_segment_next (region); } #else //USE_REGIONS clear_card_for_addresses ( generation_allocation_start (generation_of (1)), generation_allocation_start (generation_of (0))); #endif //USE_REGIONS #ifdef _DEBUG uint8_t* start = get_soh_start_object (ephemeral_heap_segment, youngest_generation); assert (heap_segment_allocated (ephemeral_heap_segment) == (start + get_soh_start_obj_len (start))); #endif //_DEBUG } } heap_segment* gc_heap::make_heap_segment (uint8_t* new_pages, size_t size, gc_heap* hp, int gen_num) { gc_oh_num oh = gen_to_oh (gen_num); size_t initial_commit = SEGMENT_INITIAL_COMMIT; int h_number = #ifdef MULTIPLE_HEAPS hp->heap_number; #else 0; #endif //MULTIPLE_HEAPS if (!virtual_commit (new_pages, initial_commit, oh, h_number)) { return 0; } #ifdef USE_REGIONS dprintf (REGIONS_LOG, ("Making region %Ix->%Ix(%Idmb)", new_pages, (new_pages + size), (size / 1024 / 1024))); heap_segment* new_segment = get_region_info (new_pages); uint8_t* start = new_pages + sizeof (aligned_plug_and_gap); #else heap_segment* new_segment = (heap_segment*)new_pages; uint8_t* start = new_pages + segment_info_size; #endif //USE_REGIONS heap_segment_mem (new_segment) = start; heap_segment_used (new_segment) = start; heap_segment_reserved (new_segment) = new_pages + size; heap_segment_committed (new_segment) = (use_large_pages_p ? heap_segment_reserved(new_segment) : (new_pages + initial_commit)); init_heap_segment (new_segment, hp #ifdef USE_REGIONS , new_pages, size, gen_num #endif //USE_REGIONS ); dprintf (2, ("Creating heap segment %Ix", (size_t)new_segment)); return new_segment; } void gc_heap::init_heap_segment (heap_segment* seg, gc_heap* hp #ifdef USE_REGIONS , uint8_t* start, size_t size, int gen_num #endif //USE_REGIONS ) { seg->flags = 0; heap_segment_next (seg) = 0; heap_segment_plan_allocated (seg) = heap_segment_mem (seg); heap_segment_allocated (seg) = heap_segment_mem (seg); heap_segment_saved_allocated (seg) = heap_segment_mem (seg); heap_segment_decommit_target (seg) = heap_segment_reserved (seg); #ifdef BACKGROUND_GC heap_segment_background_allocated (seg) = 0; heap_segment_saved_bg_allocated (seg) = 0; #endif //BACKGROUND_GC #ifdef MULTIPLE_HEAPS heap_segment_heap (seg) = hp; #endif //MULTIPLE_HEAPS #ifdef USE_REGIONS int gen_num_for_region = min (gen_num, max_generation); heap_segment_gen_num (seg) = (uint8_t)gen_num_for_region; heap_segment_plan_gen_num (seg) = gen_num_for_region; heap_segment_swept_in_plan (seg) = false; #endif //USE_REGIONS #ifdef USE_REGIONS int num_basic_regions = (int)(size >> min_segment_size_shr); size_t basic_region_size = (size_t)1 << min_segment_size_shr; dprintf (REGIONS_LOG, ("this region contains %d basic regions", num_basic_regions)); if (num_basic_regions > 1) { for (int i = 1; i < num_basic_regions; i++) { uint8_t* basic_region_start = start + (i * basic_region_size); heap_segment* basic_region = get_region_info (basic_region_start); heap_segment_allocated (basic_region) = (uint8_t*)(ptrdiff_t)-i; dprintf (REGIONS_LOG, ("Initing basic region %Ix->%Ix(%Idmb) alloc to %Ix", basic_region_start, (basic_region_start + basic_region_size), (size_t)(basic_region_size / 1024 / 1024), heap_segment_allocated (basic_region))); heap_segment_gen_num (basic_region) = (uint8_t)gen_num_for_region; heap_segment_plan_gen_num (basic_region) = gen_num_for_region; #ifdef MULTIPLE_HEAPS heap_segment_heap (basic_region) = hp; #endif //MULTIPLE_HEAPS } } #endif //USE_REGIONS } //Releases the segment to the OS. // this is always called on one thread only so calling seg_table->remove is fine. void gc_heap::delete_heap_segment (heap_segment* seg, BOOL consider_hoarding) { if (!heap_segment_uoh_p (seg)) { //cleanup the brick table back to the empty value clear_brick_table (heap_segment_mem (seg), heap_segment_reserved (seg)); } #ifdef USE_REGIONS return_free_region (seg); #else // USE_REGIONS if (consider_hoarding) { assert ((heap_segment_mem (seg) - (uint8_t*)seg) <= ptrdiff_t(2*OS_PAGE_SIZE)); size_t ss = (size_t) (heap_segment_reserved (seg) - (uint8_t*)seg); //Don't keep the big ones. if (ss <= INITIAL_ALLOC) { dprintf (2, ("Hoarding segment %Ix", (size_t)seg)); #ifdef BACKGROUND_GC // We don't need to clear the decommitted flag because when this segment is used // for a new segment the flags will be cleared. if (!heap_segment_decommitted_p (seg)) #endif //BACKGROUND_GC { decommit_heap_segment (seg); } seg_mapping_table_remove_segment (seg); heap_segment_next (seg) = segment_standby_list; segment_standby_list = seg; seg = 0; } } if (seg != 0) { dprintf (2, ("h%d: del seg: [%Ix, %Ix[", heap_number, (size_t)seg, (size_t)(heap_segment_reserved (seg)))); #ifdef BACKGROUND_GC ::record_changed_seg ((uint8_t*)seg, heap_segment_reserved (seg), settings.gc_index, current_bgc_state, seg_deleted); decommit_mark_array_by_seg (seg); #endif //BACKGROUND_GC seg_mapping_table_remove_segment (seg); release_segment (seg); } #endif //USE_REGIONS } //resets the pages beyond allocates size so they won't be swapped out and back in void gc_heap::reset_heap_segment_pages (heap_segment* seg) { size_t page_start = align_on_page ((size_t)heap_segment_allocated (seg)); size_t size = (size_t)heap_segment_committed (seg) - page_start; if (size != 0) GCToOSInterface::VirtualReset((void*)page_start, size, false /* unlock */); } void gc_heap::decommit_heap_segment_pages (heap_segment* seg, size_t extra_space) { if (use_large_pages_p) return; uint8_t* page_start = align_on_page (heap_segment_allocated(seg)); assert (heap_segment_committed (seg) >= page_start); size_t size = heap_segment_committed (seg) - page_start; extra_space = align_on_page (extra_space); if (size >= max ((extra_space + 2*OS_PAGE_SIZE), MIN_DECOMMIT_SIZE)) { page_start += max(extra_space, 32*OS_PAGE_SIZE); decommit_heap_segment_pages_worker (seg, page_start); } } size_t gc_heap::decommit_heap_segment_pages_worker (heap_segment* seg, uint8_t* new_committed) { assert (!use_large_pages_p); uint8_t* page_start = align_on_page (new_committed); ptrdiff_t size = heap_segment_committed (seg) - page_start; if (size > 0) { bool decommit_succeeded_p = virtual_decommit (page_start, (size_t)size, heap_segment_oh (seg), heap_number); if (decommit_succeeded_p) { dprintf (3, ("Decommitting heap segment [%Ix, %Ix[(%d)", (size_t)page_start, (size_t)(page_start + size), size)); heap_segment_committed (seg) = page_start; if (heap_segment_used (seg) > heap_segment_committed (seg)) { heap_segment_used (seg) = heap_segment_committed (seg); } } else { dprintf (3, ("Decommitting heap segment failed")); } } return size; } //decommit all pages except one or 2 void gc_heap::decommit_heap_segment (heap_segment* seg) { #ifdef USE_REGIONS if (!dt_high_memory_load_p()) { return; } #endif uint8_t* page_start = align_on_page (heap_segment_mem (seg)); dprintf (3, ("Decommitting heap segment %Ix(%Ix)", (size_t)seg, heap_segment_mem (seg))); #if defined(BACKGROUND_GC) && !defined(USE_REGIONS) page_start += OS_PAGE_SIZE; #endif //BACKGROUND_GC && !USE_REGIONS assert (heap_segment_committed (seg) >= page_start); size_t size = heap_segment_committed (seg) - page_start; bool decommit_succeeded_p = virtual_decommit (page_start, size, heap_segment_oh (seg), heap_number); if (decommit_succeeded_p) { //re-init the segment object heap_segment_committed (seg) = page_start; if (heap_segment_used (seg) > heap_segment_committed (seg)) { heap_segment_used (seg) = heap_segment_committed (seg); } } } void gc_heap::clear_gen0_bricks() { if (!gen0_bricks_cleared) { gen0_bricks_cleared = TRUE; //initialize brick table for gen 0 #ifdef USE_REGIONS heap_segment* gen0_region = generation_start_segment (generation_of (0)); while (gen0_region) { uint8_t* clear_start = heap_segment_mem (gen0_region); #else heap_segment* gen0_region = ephemeral_heap_segment; uint8_t* clear_start = generation_allocation_start (generation_of (0)); { #endif //USE_REGIONS for (size_t b = brick_of (clear_start); b < brick_of (align_on_brick (heap_segment_allocated (gen0_region))); b++) { set_brick (b, -1); } #ifdef USE_REGIONS gen0_region = heap_segment_next (gen0_region); #endif //USE_REGIONS } } } void gc_heap::check_gen0_bricks() { //#ifdef _DEBUG if (gen0_bricks_cleared) { #ifdef USE_REGIONS heap_segment* gen0_region = generation_start_segment (generation_of (0)); while (gen0_region) { uint8_t* start = heap_segment_mem (gen0_region); #else heap_segment* gen0_region = ephemeral_heap_segment; uint8_t* start = generation_allocation_start (generation_of (0)); { #endif //USE_REGIONS size_t end_b = brick_of (heap_segment_allocated (gen0_region)); for (size_t b = brick_of (start); b < end_b; b++) { assert (brick_table[b] != 0); if (brick_table[b] == 0) { GCToOSInterface::DebugBreak(); } } #ifdef USE_REGIONS gen0_region = heap_segment_next (gen0_region); #endif //USE_REGIONS } } //#endif //_DEBUG } #ifdef BACKGROUND_GC void gc_heap::rearrange_small_heap_segments() { heap_segment* seg = freeable_soh_segment; while (seg) { heap_segment* next_seg = heap_segment_next (seg); // TODO: we need to consider hoarding here. delete_heap_segment (seg, FALSE); seg = next_seg; } freeable_soh_segment = 0; } #endif //BACKGROUND_GC void gc_heap::rearrange_uoh_segments() { dprintf (2, ("deleting empty large segments")); heap_segment* seg = freeable_uoh_segment; while (seg) { heap_segment* next_seg = heap_segment_next (seg); delete_heap_segment (seg, GCConfig::GetRetainVM()); seg = next_seg; } freeable_uoh_segment = 0; } #ifndef USE_REGIONS void gc_heap::rearrange_heap_segments(BOOL compacting) { heap_segment* seg = generation_start_segment (generation_of (max_generation)); heap_segment* prev_seg = 0; heap_segment* next_seg = 0; while (seg) { next_seg = heap_segment_next (seg); //link ephemeral segment when expanding if ((next_seg == 0) && (seg != ephemeral_heap_segment)) { seg->next = ephemeral_heap_segment; next_seg = heap_segment_next (seg); } //re-used expanded heap segment if ((seg == ephemeral_heap_segment) && next_seg) { heap_segment_next (prev_seg) = next_seg; heap_segment_next (seg) = 0; } else { uint8_t* end_segment = (compacting ? heap_segment_plan_allocated (seg) : heap_segment_allocated (seg)); // check if the segment was reached by allocation if ((end_segment == heap_segment_mem (seg))&& !heap_segment_read_only_p (seg)) { //if not, unthread and delete assert (prev_seg); assert (seg != ephemeral_heap_segment); heap_segment_next (prev_seg) = next_seg; delete_heap_segment (seg, GCConfig::GetRetainVM()); dprintf (2, ("Deleting heap segment %Ix", (size_t)seg)); } else { if (!heap_segment_read_only_p (seg)) { if (compacting) { heap_segment_allocated (seg) = heap_segment_plan_allocated (seg); } // reset the pages between allocated and committed. if (seg != ephemeral_heap_segment) { decommit_heap_segment_pages (seg, 0); } } prev_seg = seg; } } seg = next_seg; } } #endif //!USE_REGIONS #if defined(USE_REGIONS) // trim down the list of free regions pointed at by free_list down to target_count, moving the extra ones to surplus_list static void remove_surplus_regions (region_free_list* free_list, region_free_list* surplus_list, size_t target_count) { while (free_list->get_num_free_regions() > target_count) { // remove one region from the heap's free list heap_segment* region = free_list->unlink_region_front(); // and put it on the surplus list surplus_list->add_region_front (region); } } // add regions from surplus_list to free_list, trying to reach target_count static int64_t add_regions (region_free_list* free_list, region_free_list* surplus_list, size_t target_count) { int64_t added_count = 0; while (free_list->get_num_free_regions() < target_count) { if (surplus_list->get_num_free_regions() == 0) break; added_count++; // remove one region from the surplus list heap_segment* region = surplus_list->unlink_region_front(); // and put it on the heap's free list free_list->add_region_front (region); } return added_count; } region_free_list::region_free_list() : num_free_regions (0), size_free_regions (0), size_committed_in_free_regions (0), num_free_regions_added (0), num_free_regions_removed (0), head_free_region (nullptr), tail_free_region (nullptr) { } void region_free_list::verify (bool empty_p) { #ifdef _DEBUG assert ((num_free_regions == 0) == empty_p); assert ((size_free_regions == 0) == empty_p); assert ((size_committed_in_free_regions == 0) == empty_p); assert ((head_free_region == nullptr) == empty_p); assert ((tail_free_region == nullptr) == empty_p); assert (num_free_regions == (num_free_regions_added - num_free_regions_removed)); if (!empty_p) { assert (heap_segment_next (tail_free_region) == nullptr); assert (heap_segment_prev_free_region (head_free_region) == nullptr); size_t actual_count = 0; heap_segment* last_region = nullptr; for (heap_segment* region = head_free_region; region != nullptr; region = heap_segment_next(region)) { last_region = region; actual_count++; } assert (num_free_regions == actual_count); assert (last_region == tail_free_region); heap_segment* first_region = nullptr; for (heap_segment* region = tail_free_region; region != nullptr; region = heap_segment_prev_free_region(region)) { first_region = region; actual_count--; } assert (actual_count == 0); assert (head_free_region == first_region); } #endif } void region_free_list::reset() { num_free_regions = 0; size_free_regions = 0; size_committed_in_free_regions = 0; head_free_region = nullptr; tail_free_region = nullptr; } inline void region_free_list::update_added_region_info (heap_segment* region) { num_free_regions++; num_free_regions_added++; size_t region_size = get_region_size (region); size_free_regions += region_size; size_t region_committed_size = get_region_committed_size (region); size_committed_in_free_regions += region_committed_size; verify (false); } void region_free_list::add_region_front (heap_segment* region) { assert (heap_segment_containing_free_list (region) == nullptr); heap_segment_containing_free_list(region) = this; if (head_free_region != nullptr) { heap_segment_prev_free_region(head_free_region) = region; assert (tail_free_region != nullptr); } else { tail_free_region = region; } heap_segment_next (region) = head_free_region; head_free_region = region; heap_segment_prev_free_region (region) = nullptr; update_added_region_info (region); } // This inserts fully committed regions at the head, otherwise it goes backward in the list till // we find a region whose committed size is >= this region's committed or we reach the head. void region_free_list::add_region_in_descending_order (heap_segment* region_to_add) { assert (heap_segment_containing_free_list (region_to_add) == nullptr); heap_segment_containing_free_list (region_to_add) = this; heap_segment_age_in_free (region_to_add) = 0; heap_segment* prev_region = nullptr; heap_segment* region = nullptr; // if the region is fully committed, it's inserted at the front if (heap_segment_committed (region_to_add) == heap_segment_reserved (region_to_add)) { region = head_free_region; } else { // otherwise we search backwards for a good insertion spot // most regions at the front are fully committed and thus boring to search size_t region_to_add_committed = get_region_committed_size (region_to_add); for (prev_region = tail_free_region; prev_region != nullptr; prev_region = heap_segment_prev_free_region (prev_region)) { size_t prev_region_committed = get_region_committed_size (prev_region); if (prev_region_committed >= region_to_add_committed) { break; } region = prev_region; } } if (prev_region != nullptr) { heap_segment_next (prev_region) = region_to_add; } else { assert (region == head_free_region); head_free_region = region_to_add; } heap_segment_prev_free_region (region_to_add) = prev_region; heap_segment_next (region_to_add) = region; if (region != nullptr) { heap_segment_prev_free_region (region) = region_to_add; } else { assert (prev_region == tail_free_region); tail_free_region = region_to_add; } update_added_region_info (region_to_add); } heap_segment* region_free_list::unlink_region_front() { heap_segment* region = head_free_region; if (region != nullptr) { assert (heap_segment_containing_free_list (region) == this); unlink_region (region); } return region; } void region_free_list::unlink_region (heap_segment* region) { region_free_list* rfl = heap_segment_containing_free_list (region); rfl->verify (false); heap_segment* prev = heap_segment_prev_free_region (region); heap_segment* next = heap_segment_next (region); if (prev != nullptr) { assert (region != rfl->head_free_region); assert (heap_segment_next (prev) == region); heap_segment_next (prev) = next; } else { assert (region == rfl->head_free_region); rfl->head_free_region = next; } if (next != nullptr) { assert (region != rfl->tail_free_region); assert (heap_segment_prev_free_region (next) == region); heap_segment_prev_free_region (next) = prev; } else { assert (region == rfl->tail_free_region); rfl->tail_free_region = prev; } heap_segment_containing_free_list (region) = nullptr; rfl->num_free_regions--; rfl->num_free_regions_removed++; size_t region_size = get_region_size (region); assert (rfl->size_free_regions >= region_size); rfl->size_free_regions -= region_size; size_t region_committed_size = get_region_committed_size (region); assert (rfl->size_committed_in_free_regions >= region_committed_size); rfl->size_committed_in_free_regions -= region_committed_size; } free_region_kind region_free_list::get_region_kind (heap_segment* region) { const size_t BASIC_REGION_SIZE = global_region_allocator.get_region_alignment(); const size_t LARGE_REGION_SIZE = global_region_allocator.get_large_region_alignment(); size_t region_size = get_region_size (region); if (region_size == BASIC_REGION_SIZE) return basic_free_region; else if (region_size == LARGE_REGION_SIZE) return large_free_region; else { assert(region_size > LARGE_REGION_SIZE); return huge_free_region; } } heap_segment* region_free_list::unlink_smallest_region (size_t minimum_size) { verify (num_free_regions == 0); // look for the smallest region that is large enough heap_segment* smallest_region = nullptr; size_t smallest_size = (size_t)-1; for (heap_segment* region = head_free_region; region != nullptr; region = heap_segment_next (region)) { uint8_t* region_start = get_region_start(region); uint8_t* region_end = heap_segment_reserved(region); size_t region_size = get_region_size (region); const size_t LARGE_REGION_SIZE = global_region_allocator.get_large_region_alignment(); assert (region_size >= LARGE_REGION_SIZE * 2); if (region_size >= minimum_size) { // found a region that is large enough - see if it's smaller than the smallest so far if (smallest_size > region_size) { smallest_size = region_size; smallest_region = region; } // is the region's size equal to the minimum on this list? if (region_size == LARGE_REGION_SIZE * 2) { // we won't find a smaller one on this list assert (region == smallest_region); break; } } } if (smallest_region != nullptr) { unlink_region (smallest_region); dprintf(REGIONS_LOG, ("get %Ix-%Ix-%Ix", heap_segment_mem(smallest_region), heap_segment_committed(smallest_region), heap_segment_used(smallest_region))); } return smallest_region; } void region_free_list::transfer_regions (region_free_list* from) { this->verify (this->num_free_regions == 0); from->verify (from->num_free_regions == 0); if (from->num_free_regions == 0) { // the from list is empty return; } if (num_free_regions == 0) { // this list is empty head_free_region = from->head_free_region; tail_free_region = from->tail_free_region; } else { // both free lists are non-empty // attach the from list at the tail heap_segment* this_tail = tail_free_region; heap_segment* from_head = from->head_free_region; heap_segment_next (this_tail) = from_head; heap_segment_prev_free_region (from_head) = this_tail; tail_free_region = from->tail_free_region; } for (heap_segment* region = from->head_free_region; region != nullptr; region = heap_segment_next (region)) { heap_segment_containing_free_list (region) = this; } num_free_regions += from->num_free_regions; num_free_regions_added += from->num_free_regions; size_free_regions += from->size_free_regions; size_committed_in_free_regions += from->size_committed_in_free_regions; from->num_free_regions_removed += from->num_free_regions; from->reset(); verify (false); } size_t region_free_list::get_num_free_regions() { #ifdef _DEBUG verify (num_free_regions == 0); #endif //_DEBUG return num_free_regions; } void region_free_list::add_region (heap_segment* region, region_free_list to_free_list[count_free_region_kinds]) { free_region_kind kind = get_region_kind (region); to_free_list[kind].add_region_front (region); } void region_free_list::add_region_descending (heap_segment* region, region_free_list to_free_list[count_free_region_kinds]) { free_region_kind kind = get_region_kind (region); to_free_list[kind].add_region_in_descending_order (region); } void region_free_list::age_free_regions() { for (heap_segment* region = head_free_region; region != nullptr; region = heap_segment_next (region)) { // only age to 99... that's enough for us to decommit this. if (heap_segment_age_in_free (region) < MAX_AGE_IN_FREE) heap_segment_age_in_free (region)++; } } void region_free_list::age_free_regions (region_free_list free_lists[count_free_region_kinds]) { for (int kind = basic_free_region; kind < count_free_region_kinds; kind++) { free_lists[kind].age_free_regions(); } } void region_free_list::print (int hn, const char* msg, int* ages) { dprintf (3, ("h%2d PRINTING-------------------------------", hn)); for (heap_segment* region = head_free_region; region != nullptr; region = heap_segment_next (region)) { if (ages) { ages[heap_segment_age_in_free (region)]++; } dprintf (3, ("[%s] h%2d age %d region %Ix (%Id)%s", msg, hn, (int)heap_segment_age_in_free (region), heap_segment_mem (region), get_region_committed_size (region), ((heap_segment_committed (region) == heap_segment_reserved (region)) ? "(FC)" : ""))); } dprintf (3, ("h%2d PRINTING END-------------------------------", hn)); } void region_free_list::print (region_free_list free_lists[count_free_region_kinds], int hn, const char* msg, int* ages) { for (int kind = basic_free_region; kind < count_free_region_kinds; kind++) { free_lists[kind].print (hn, msg, ages); } } static int compare_by_committed_and_age (heap_segment* l, heap_segment* r) { size_t l_committed = get_region_committed_size (l); size_t r_committed = get_region_committed_size (r); if (l_committed > r_committed) return -1; else if (l_committed < r_committed) return 1; int l_age = heap_segment_age_in_free (l); int r_age = heap_segment_age_in_free (r); return (l_age - r_age); } static heap_segment* merge_sort_by_committed_and_age (heap_segment *head, size_t count) { if (count <= 1) return head; size_t half = count / 2; heap_segment* mid = nullptr; size_t i = 0; for (heap_segment *region = head; region != nullptr; region = heap_segment_next (region)) { i++; if (i == half) { mid = heap_segment_next (region); heap_segment_next (region) = nullptr; break; } } head = merge_sort_by_committed_and_age (head, half); mid = merge_sort_by_committed_and_age (mid, count - half); heap_segment* new_head; if (compare_by_committed_and_age (head, mid) <= 0) { new_head = head; head = heap_segment_next (head); } else { new_head = mid; mid = heap_segment_next (mid); } heap_segment* new_tail = new_head; while ((head != nullptr) && (mid != nullptr)) { heap_segment* region = nullptr; if (compare_by_committed_and_age (head, mid) <= 0) { region = head; head = heap_segment_next (head); } else { region = mid; mid = heap_segment_next (mid); } heap_segment_next (new_tail) = region; new_tail = region; } if (head != nullptr) { assert (mid == nullptr); heap_segment_next (new_tail) = head; } else { heap_segment_next (new_tail) = mid; } return new_head; } void region_free_list::sort_by_committed_and_age() { if (num_free_regions <= 1) return; heap_segment* new_head = merge_sort_by_committed_and_age (head_free_region, num_free_regions); // need to set head, tail, and all the prev links again head_free_region = new_head; heap_segment* prev = nullptr; for (heap_segment* region = new_head; region != nullptr; region = heap_segment_next (region)) { heap_segment_prev_free_region (region) = prev; assert ((prev == nullptr) || (compare_by_committed_and_age (prev, region) <= 0)); prev = region; } tail_free_region = prev; } #endif //USE_REGIONS void gc_heap::distribute_free_regions() { #ifdef USE_REGIONS const int kind_count = large_free_region + 1; // first step: accumulate the number of free regions and the budget over all heaps // and move huge regions to global free list size_t total_num_free_regions[kind_count] = { 0, 0 }; size_t total_budget_in_region_units[kind_count] = { 0, 0 }; size_t num_decommit_regions_by_time = 0; size_t size_decommit_regions_by_time = 0; size_t heap_budget_in_region_units[MAX_SUPPORTED_CPUS][kind_count]; size_t region_size[kind_count] = { global_region_allocator.get_region_alignment(), global_region_allocator.get_large_region_alignment() }; region_free_list surplus_regions[kind_count]; for (int kind = basic_free_region; kind < kind_count; kind++) { // we may still have regions left on the regions_to_decommit list - // use these to fill the budget as well surplus_regions[kind].transfer_regions (&global_regions_to_decommit[kind]); } #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; // just to reduce the number of #ifdefs in the code below const int i = 0; #endif //MULTIPLE_HEAPS for (int kind = basic_free_region; kind < kind_count; kind++) { // If there are regions in free that haven't been used in AGE_IN_FREE_TO_DECOMMIT GCs we always decommit them. region_free_list& region_list = hp->free_regions[kind]; heap_segment* next_region = nullptr; for (heap_segment* region = region_list.get_first_free_region(); region != nullptr; region = next_region) { next_region = heap_segment_next (region); if (heap_segment_age_in_free (region) >= AGE_IN_FREE_TO_DECOMMIT) { num_decommit_regions_by_time++; size_decommit_regions_by_time += get_region_committed_size (region); dprintf (REGIONS_LOG, ("h%2d region %Ix age %2d, decommit", i, heap_segment_mem (region), heap_segment_age_in_free (region))); region_free_list::unlink_region (region); region_free_list::add_region (region, global_regions_to_decommit); } } total_num_free_regions[kind] += region_list.get_num_free_regions(); } global_free_huge_regions.transfer_regions (&hp->free_regions[huge_free_region]); heap_budget_in_region_units[i][basic_free_region] = 0; heap_budget_in_region_units[i][large_free_region] = 0; for (int gen = soh_gen0; gen < total_generation_count; gen++) { ptrdiff_t budget_gen = max (hp->estimate_gen_growth (gen), 0); int kind = gen >= loh_generation; size_t budget_gen_in_region_units = (budget_gen + (region_size[kind] - 1)) / region_size[kind]; dprintf (REGIONS_LOG, ("h%2d gen %d has an estimated growth of %Id bytes (%Id regions)", i, gen, budget_gen, budget_gen_in_region_units)); heap_budget_in_region_units[i][kind] += budget_gen_in_region_units; total_budget_in_region_units[kind] += budget_gen_in_region_units; } } dprintf (1, ("moved %2d regions (%8Id) to decommit based on time", num_decommit_regions_by_time, size_decommit_regions_by_time)); global_free_huge_regions.transfer_regions (&global_regions_to_decommit[huge_free_region]); size_t free_space_in_huge_regions = global_free_huge_regions.get_size_free_regions(); ptrdiff_t num_regions_to_decommit[kind_count]; int region_factor[kind_count] = { 1, LARGE_REGION_FACTOR }; #ifdef TRACE_GC const char* kind_name[count_free_region_kinds] = { "basic", "large", "huge"}; #endif // TRACE_GC #ifndef MULTIPLE_HEAPS // just to reduce the number of #ifdefs in the code below const int n_heaps = 1; #endif //!MULTIPLE_HEAPS size_t num_huge_region_units_to_consider[kind_count] = { 0, free_space_in_huge_regions / region_size[large_free_region] }; for (int kind = basic_free_region; kind < kind_count; kind++) { num_regions_to_decommit[kind] = surplus_regions[kind].get_num_free_regions(); dprintf(REGIONS_LOG, ("%Id %s free regions, %Id regions budget, %Id regions on decommit list, %Id huge regions to consider", total_num_free_regions[kind], kind_name[kind], total_budget_in_region_units[kind], num_regions_to_decommit[kind], num_huge_region_units_to_consider[kind])); // check if the free regions exceed the budget // if so, put the highest free regions on the decommit list total_num_free_regions[kind] += num_regions_to_decommit[kind]; ptrdiff_t balance = total_num_free_regions[kind] + num_huge_region_units_to_consider[kind] - total_budget_in_region_units[kind]; if ( #ifdef BACKGROUND_GC background_running_p() || #endif (balance < 0)) { dprintf (REGIONS_LOG, ("distributing the %Id %s regions deficit", -balance, kind_name[kind])); // we may have a deficit or - if background GC is going on - a surplus. // adjust the budget per heap accordingly ptrdiff_t adjustment_per_heap = (balance + (n_heaps - 1)) / n_heaps; #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { ptrdiff_t new_budget = (ptrdiff_t)heap_budget_in_region_units[i][kind] + adjustment_per_heap; heap_budget_in_region_units[i][kind] = max (0, new_budget); } #endif //MULTIPLE_HEAPS } else { num_regions_to_decommit[kind] = balance; dprintf(REGIONS_LOG, ("distributing the %Id %s regions, removing %Id regions", total_budget_in_region_units[kind], kind_name[kind], num_regions_to_decommit[kind])); if (num_regions_to_decommit[kind] > 0) { // put the highest regions on the decommit list global_region_allocator.move_highest_free_regions (num_regions_to_decommit[kind]*region_factor[kind], kind == basic_free_region, global_regions_to_decommit); dprintf (REGIONS_LOG, ("Moved %Id %s regions to decommit list", global_regions_to_decommit[kind].get_num_free_regions(), kind_name[kind])); if (kind == basic_free_region) { assert (global_regions_to_decommit[kind].get_num_free_regions() == (size_t)num_regions_to_decommit[kind]); } else { dprintf (REGIONS_LOG, ("Moved %Id %s regions to decommit list", global_regions_to_decommit[huge_free_region].get_num_free_regions(), kind_name[huge_free_region])); // cannot assert we moved any regions because there may be a single huge region with more than we want to decommit } } } } for (int kind = basic_free_region; kind < kind_count; kind++) { #ifdef MULTIPLE_HEAPS // now go through all the heaps and remove any free regions above the target count for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; if (hp->free_regions[kind].get_num_free_regions() > heap_budget_in_region_units[i][kind]) { dprintf (REGIONS_LOG, ("removing %Id %s regions from heap %d with %Id regions", hp->free_regions[kind].get_num_free_regions() - heap_budget_in_region_units[i][kind], kind_name[kind], i, hp->free_regions[kind].get_num_free_regions())); remove_surplus_regions (&hp->free_regions[kind], &surplus_regions[kind], heap_budget_in_region_units[i][kind]); } } // finally go through all the heaps and distribute any surplus regions to heaps having too few free regions for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; const int i = 0; #endif //MULTIPLE_HEAPS if (hp->free_regions[kind].get_num_free_regions() < heap_budget_in_region_units[i][kind]) { int64_t num_added_regions = add_regions (&hp->free_regions[kind], &surplus_regions[kind], heap_budget_in_region_units[i][kind]); dprintf (REGIONS_LOG, ("added %Id %s regions to heap %d - now has %Id", num_added_regions, kind_name[kind], i, hp->free_regions[kind].get_num_free_regions())); } hp->free_regions[kind].sort_by_committed_and_age(); } if (surplus_regions[kind].get_num_free_regions() > 0) { assert (!"should have exhausted the surplus_regions"); global_regions_to_decommit[kind].transfer_regions (&surplus_regions[kind]); } } #ifdef MULTIPLE_HEAPS for (int kind = basic_free_region; kind < count_free_region_kinds; kind++) { if (global_regions_to_decommit[kind].get_num_free_regions() != 0) { gradual_decommit_in_progress_p = TRUE; break; } } #else //MULTIPLE_HEAPS while (decommit_step()) { } #endif //MULTIPLE_HEAPS #endif //USE_REGIONS } #ifdef WRITE_WATCH uint8_t* g_addresses [array_size+2]; // to get around the bug in GetWriteWatch #ifdef CARD_BUNDLE inline void gc_heap::verify_card_bundle_bits_set(size_t first_card_word, size_t last_card_word) { #ifdef _DEBUG for (size_t x = cardw_card_bundle (first_card_word); x < cardw_card_bundle (last_card_word); x++) { if (!card_bundle_set_p (x)) { assert (!"Card bundle not set"); dprintf (3, ("Card bundle %Ix not set", x)); } } #else UNREFERENCED_PARAMETER(first_card_word); UNREFERENCED_PARAMETER(last_card_word); #endif } // Verifies that any bundles that are not set represent only cards that are not set. inline void gc_heap::verify_card_bundles() { #ifdef _DEBUG size_t lowest_card = card_word (card_of (lowest_address)); #ifdef USE_REGIONS size_t highest_card = card_word (card_of (global_region_allocator.get_left_used_unsafe())); #else size_t highest_card = card_word (card_of (highest_address)); #endif size_t cardb = cardw_card_bundle (lowest_card); size_t end_cardb = cardw_card_bundle (align_cardw_on_bundle (highest_card)); while (cardb < end_cardb) { uint32_t* card_word = &card_table[max(card_bundle_cardw (cardb), lowest_card)]; uint32_t* card_word_end = &card_table[min(card_bundle_cardw (cardb+1), highest_card)]; if (card_bundle_set_p (cardb) == 0) { // Verify that no card is set while (card_word < card_word_end) { if (*card_word != 0) { dprintf (3, ("gc: %d, Card word %Ix for address %Ix set, card_bundle %Ix clear", dd_collection_count (dynamic_data_of (0)), (size_t)(card_word-&card_table[0]), (size_t)(card_address ((size_t)(card_word-&card_table[0]) * card_word_width)), cardb)); } assert((*card_word)==0); card_word++; } } cardb++; } #endif } // If card bundles are enabled, use write watch to find pages in the card table that have // been dirtied, and set the corresponding card bundle bits. void gc_heap::update_card_table_bundle() { if (card_bundles_enabled()) { // The address of the card word containing the card representing the lowest heap address uint8_t* base_address = (uint8_t*)(&card_table[card_word (card_of (lowest_address))]); // The address of the card word containing the card representing the highest heap address #ifdef USE_REGIONS uint8_t* high_address = (uint8_t*)(&card_table[card_word (card_of (global_region_allocator.get_left_used_unsafe()))]); #else uint8_t* high_address = (uint8_t*)(&card_table[card_word (card_of (highest_address))]); #endif //USE_REGIONS uint8_t* saved_base_address = base_address; uintptr_t bcount = array_size; size_t saved_region_size = align_on_page (high_address) - saved_base_address; do { size_t region_size = align_on_page (high_address) - base_address; dprintf (3,("Probing card table pages [%Ix, %Ix[", (size_t)base_address, (size_t)(base_address + region_size))); bool success = GCToOSInterface::GetWriteWatch(false /* resetState */, base_address, region_size, (void**)g_addresses, &bcount); assert (success && "GetWriteWatch failed!"); dprintf (3,("Found %d pages written", bcount)); for (unsigned i = 0; i < bcount; i++) { // Offset of the dirty page from the start of the card table (clamped to base_address) size_t bcardw = (uint32_t*)(max(g_addresses[i],base_address)) - &card_table[0]; // Offset of the end of the page from the start of the card table (clamped to high addr) size_t ecardw = (uint32_t*)(min(g_addresses[i]+OS_PAGE_SIZE, high_address)) - &card_table[0]; assert (bcardw >= card_word (card_of (g_gc_lowest_address))); // Set the card bundle bits representing the dirty card table page card_bundles_set (cardw_card_bundle (bcardw), cardw_card_bundle (align_cardw_on_bundle (ecardw))); dprintf (3,("Set Card bundle [%Ix, %Ix[", cardw_card_bundle (bcardw), cardw_card_bundle (align_cardw_on_bundle (ecardw)))); verify_card_bundle_bits_set(bcardw, ecardw); } if (bcount >= array_size) { base_address = g_addresses [array_size-1] + OS_PAGE_SIZE; bcount = array_size; } } while ((bcount >= array_size) && (base_address < high_address)); // Now that we've updated the card bundle bits, reset the write-tracking state. GCToOSInterface::ResetWriteWatch (saved_base_address, saved_region_size); } } #endif //CARD_BUNDLE #ifdef BACKGROUND_GC // static void gc_heap::reset_write_watch_for_gc_heap(void* base_address, size_t region_size) { #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP SoftwareWriteWatch::ClearDirty(base_address, region_size); #else // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP GCToOSInterface::ResetWriteWatch(base_address, region_size); #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP } // static void gc_heap::get_write_watch_for_gc_heap(bool reset, void *base_address, size_t region_size, void** dirty_pages, uintptr_t* dirty_page_count_ref, bool is_runtime_suspended) { #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP SoftwareWriteWatch::GetDirty(base_address, region_size, dirty_pages, dirty_page_count_ref, reset, is_runtime_suspended); #else // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP UNREFERENCED_PARAMETER(is_runtime_suspended); bool success = GCToOSInterface::GetWriteWatch(reset, base_address, region_size, dirty_pages, dirty_page_count_ref); assert(success); #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP } const size_t ww_reset_quantum = 128*1024*1024; inline void gc_heap::switch_one_quantum() { enable_preemptive (); GCToOSInterface::Sleep (1); disable_preemptive (true); } void gc_heap::reset_ww_by_chunk (uint8_t* start_address, size_t total_reset_size) { size_t reset_size = 0; size_t remaining_reset_size = 0; size_t next_reset_size = 0; while (reset_size != total_reset_size) { remaining_reset_size = total_reset_size - reset_size; next_reset_size = ((remaining_reset_size >= ww_reset_quantum) ? ww_reset_quantum : remaining_reset_size); if (next_reset_size) { reset_write_watch_for_gc_heap(start_address, next_reset_size); reset_size += next_reset_size; switch_one_quantum(); } } assert (reset_size == total_reset_size); } // This does a Sleep(1) for every reset ww_reset_quantum bytes of reset // we do concurrently. void gc_heap::switch_on_reset (BOOL concurrent_p, size_t* current_total_reset_size, size_t last_reset_size) { if (concurrent_p) { *current_total_reset_size += last_reset_size; dprintf (2, ("reset %Id bytes so far", *current_total_reset_size)); if (*current_total_reset_size > ww_reset_quantum) { switch_one_quantum(); *current_total_reset_size = 0; } } } void gc_heap::reset_write_watch (BOOL concurrent_p) { #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP // Software write watch currently requires the runtime to be suspended during reset. // See SoftwareWriteWatch::ClearDirty(). assert(!concurrent_p); #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP dprintf (2, ("bgc lowest: %Ix, bgc highest: %Ix", background_saved_lowest_address, background_saved_highest_address)); size_t reset_size = 0; for (int i = get_start_generation_index(); i < total_generation_count; i++) { heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (i))); while (seg) { uint8_t* base_address = align_lower_page (heap_segment_mem (seg)); base_address = max (base_address, background_saved_lowest_address); uint8_t* high_address = ((seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg)); high_address = min (high_address, background_saved_highest_address); if (base_address < high_address) { size_t reset_size = 0; size_t region_size = high_address - base_address; dprintf (3, ("h%d, gen: %Ix, ww: [%Ix(%Id)", heap_number, i, (size_t)base_address, region_size)); //reset_ww_by_chunk (base_address, region_size); reset_write_watch_for_gc_heap(base_address, region_size); switch_on_reset (concurrent_p, &reset_size, region_size); } seg = heap_segment_next_rw (seg); concurrent_print_time_delta (i == max_generation ? "CRWW soh": "CRWW uoh"); } } } #endif //BACKGROUND_GC #endif //WRITE_WATCH #ifdef BACKGROUND_GC void gc_heap::restart_vm() { //assert (generation_allocation_pointer (youngest_generation) == 0); dprintf (3, ("Restarting EE")); STRESS_LOG0(LF_GC, LL_INFO10000, "Concurrent GC: Restarting EE\n"); ee_proceed_event.Set(); } inline void fire_alloc_wait_event (alloc_wait_reason awr, BOOL begin_p) { if (awr != awr_ignored) { if (begin_p) { FIRE_EVENT(BGCAllocWaitBegin, awr); } else { FIRE_EVENT(BGCAllocWaitEnd, awr); } } } void gc_heap::fire_alloc_wait_event_begin (alloc_wait_reason awr) { fire_alloc_wait_event (awr, TRUE); } void gc_heap::fire_alloc_wait_event_end (alloc_wait_reason awr) { fire_alloc_wait_event (awr, FALSE); } #endif //BACKGROUND_GC void gc_heap::make_generation (int gen_num, heap_segment* seg, uint8_t* start) { generation* gen = generation_of (gen_num); gen->gen_num = gen_num; #ifndef USE_REGIONS gen->allocation_start = start; gen->plan_allocation_start = 0; #endif //USE_REGIONS gen->allocation_context.alloc_ptr = 0; gen->allocation_context.alloc_limit = 0; gen->allocation_context.alloc_bytes = 0; gen->allocation_context.alloc_bytes_uoh = 0; gen->allocation_context_start_region = 0; gen->start_segment = seg; #ifdef USE_REGIONS dprintf (REGIONS_LOG, ("g%d start seg is %Ix-%Ix", gen_num, (size_t)seg, heap_segment_mem (seg))); gen->tail_region = seg; gen->plan_start_segment = 0; gen->tail_ro_region = 0; #endif //USE_REGIONS gen->allocation_segment = seg; gen->free_list_space = 0; gen->pinned_allocated = 0; gen->free_list_allocated = 0; gen->end_seg_allocated = 0; gen->condemned_allocated = 0; gen->sweep_allocated = 0; gen->free_obj_space = 0; gen->allocation_size = 0; gen->pinned_allocation_sweep_size = 0; gen->pinned_allocation_compact_size = 0; gen->allocate_end_seg_p = FALSE; gen->free_list_allocator.clear(); #ifdef DOUBLY_LINKED_FL gen->set_bgc_mark_bit_p = FALSE; #endif //DOUBLY_LINKED_FL #ifdef FREE_USAGE_STATS memset (gen->gen_free_spaces, 0, sizeof (gen->gen_free_spaces)); memset (gen->gen_current_pinned_free_spaces, 0, sizeof (gen->gen_current_pinned_free_spaces)); memset (gen->gen_plugs, 0, sizeof (gen->gen_plugs)); #endif //FREE_USAGE_STATS } void gc_heap::adjust_ephemeral_limits () { #ifndef USE_REGIONS ephemeral_low = generation_allocation_start (generation_of (max_generation - 1)); ephemeral_high = heap_segment_reserved (ephemeral_heap_segment); dprintf (3, ("new ephemeral low: %Ix new ephemeral high: %Ix", (size_t)ephemeral_low, (size_t)ephemeral_high)) #ifndef MULTIPLE_HEAPS // This updates the write barrier helpers with the new info. stomp_write_barrier_ephemeral(ephemeral_low, ephemeral_high); #endif // MULTIPLE_HEAPS #endif //USE_REGIONS } #if defined(TRACE_GC) || defined(GC_CONFIG_DRIVEN) FILE* CreateLogFile(const GCConfigStringHolder& temp_logfile_name, bool is_config) { FILE* logFile; if (!temp_logfile_name.Get()) { return nullptr; } char logfile_name[MAX_LONGPATH+1]; //uint32_t pid = GCToOSInterface::GetCurrentProcessId(); const char* suffix = is_config ? ".config.log" : ".log"; //_snprintf_s(logfile_name, MAX_LONGPATH+1, _TRUNCATE, "%s.%d%s", temp_logfile_name.Get(), pid, suffix); _snprintf_s(logfile_name, MAX_LONGPATH+1, _TRUNCATE, "%s%s", temp_logfile_name.Get(), suffix); logFile = fopen(logfile_name, "wb"); return logFile; } #endif //TRACE_GC || GC_CONFIG_DRIVEN size_t gc_heap::get_segment_size_hard_limit (uint32_t* num_heaps, bool should_adjust_num_heaps) { assert (heap_hard_limit); size_t aligned_hard_limit = align_on_segment_hard_limit (heap_hard_limit); if (should_adjust_num_heaps) { uint32_t max_num_heaps = (uint32_t)(aligned_hard_limit / min_segment_size_hard_limit); if (*num_heaps > max_num_heaps) { *num_heaps = max_num_heaps; } } size_t seg_size = aligned_hard_limit / *num_heaps; size_t aligned_seg_size = (use_large_pages_p ? align_on_segment_hard_limit (seg_size) : round_up_power2 (seg_size)); assert (g_theGCHeap->IsValidSegmentSize (aligned_seg_size)); size_t seg_size_from_config = (size_t)GCConfig::GetSegmentSize(); if (seg_size_from_config) { size_t aligned_seg_size_config = (use_large_pages_p ? align_on_segment_hard_limit (seg_size) : round_up_power2 (seg_size_from_config)); aligned_seg_size = max (aligned_seg_size, aligned_seg_size_config); } //printf ("limit: %Idmb, aligned: %Idmb, %d heaps, seg size from config: %Idmb, seg size %Idmb", // (heap_hard_limit / 1024 / 1024), // (aligned_hard_limit / 1024 / 1024), // *num_heaps, // (seg_size_from_config / 1024 / 1024), // (aligned_seg_size / 1024 / 1024)); return aligned_seg_size; } #ifdef USE_REGIONS bool allocate_initial_regions(int number_of_heaps) { initial_regions = new (nothrow) uint8_t*[number_of_heaps][total_generation_count][2]; if (initial_regions == nullptr) { return false; } for (int i = 0; i < number_of_heaps; i++) { bool succeed = global_region_allocator.allocate_large_region( &initial_regions[i][poh_generation][0], &initial_regions[i][poh_generation][1], allocate_forward, 0, nullptr); assert(succeed); } for (int i = 0; i < number_of_heaps; i++) { for (int gen = max_generation; gen >= 0; gen--) { bool succeed = global_region_allocator.allocate_basic_region( &initial_regions[i][gen][0], &initial_regions[i][gen][1], nullptr); assert(succeed); } } for (int i = 0; i < number_of_heaps; i++) { bool succeed = global_region_allocator.allocate_large_region( &initial_regions[i][loh_generation][0], &initial_regions[i][loh_generation][1], allocate_forward, 0, nullptr); assert(succeed); } return true; } #endif HRESULT gc_heap::initialize_gc (size_t soh_segment_size, size_t loh_segment_size, size_t poh_segment_size #ifdef MULTIPLE_HEAPS ,int number_of_heaps #endif //MULTIPLE_HEAPS ) { #ifdef TRACE_GC if (GCConfig::GetLogEnabled()) { gc_log = CreateLogFile(GCConfig::GetLogFile(), false); if (gc_log == NULL) return E_FAIL; // GCLogFileSize in MBs. gc_log_file_size = static_cast<size_t>(GCConfig::GetLogFileSize()); if (gc_log_file_size <= 0 || gc_log_file_size > 500) { fclose (gc_log); return E_FAIL; } gc_log_lock.Initialize(); gc_log_buffer = new (nothrow) uint8_t [gc_log_buffer_size]; if (!gc_log_buffer) { fclose(gc_log); return E_FAIL; } memset (gc_log_buffer, '*', gc_log_buffer_size); max_gc_buffers = gc_log_file_size * 1024 * 1024 / gc_log_buffer_size; } #endif // TRACE_GC #ifdef GC_CONFIG_DRIVEN if (GCConfig::GetConfigLogEnabled()) { gc_config_log = CreateLogFile(GCConfig::GetConfigLogFile(), true); if (gc_config_log == NULL) return E_FAIL; gc_config_log_buffer = new (nothrow) uint8_t [gc_config_log_buffer_size]; if (!gc_config_log_buffer) { fclose(gc_config_log); return E_FAIL; } compact_ratio = static_cast<int>(GCConfig::GetCompactRatio()); // h# | GC | gen | C | EX | NF | BF | ML | DM || PreS | PostS | Merge | Conv | Pre | Post | PrPo | PreP | PostP | cprintf (("%2s | %6s | %1s | %1s | %2s | %2s | %2s | %2s | %2s || %5s | %5s | %5s | %5s | %5s | %5s | %5s | %5s | %5s |", "h#", // heap index "GC", // GC index "g", // generation "C", // compaction (empty means sweeping), 'M' means it was mandatory, 'W' means it was not "EX", // heap expansion "NF", // normal fit "BF", // best fit (if it indicates neither NF nor BF it means it had to acquire a new seg. "ML", // mark list "DM", // demotion "PreS", // short object before pinned plug "PostS", // short object after pinned plug "Merge", // merged pinned plugs "Conv", // converted to pinned plug "Pre", // plug before pinned plug but not after "Post", // plug after pinned plug but not before "PrPo", // plug both before and after pinned plug "PreP", // pre short object padded "PostP" // post short object padded )); } #endif //GC_CONFIG_DRIVEN HRESULT hres = S_OK; #ifdef WRITE_WATCH hardware_write_watch_api_supported(); #ifdef BACKGROUND_GC if (can_use_write_watch_for_gc_heap() && GCConfig::GetConcurrentGC()) { gc_can_use_concurrent = true; #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP virtual_alloc_hardware_write_watch = true; #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP } else { gc_can_use_concurrent = false; } #endif //BACKGROUND_GC #endif //WRITE_WATCH #ifdef BACKGROUND_GC // leave the first page to contain only segment info // because otherwise we could need to revisit the first page frequently in // background GC. segment_info_size = OS_PAGE_SIZE; #else segment_info_size = Align (sizeof (heap_segment), get_alignment_constant (FALSE)); #endif //BACKGROUND_GC reserved_memory = 0; size_t initial_heap_size = soh_segment_size + loh_segment_size + poh_segment_size; uint16_t* heap_no_to_numa_node = nullptr; #ifdef MULTIPLE_HEAPS reserved_memory_limit = initial_heap_size * number_of_heaps; if (!heap_select::init(number_of_heaps)) return E_OUTOFMEMORY; if (GCToOSInterface::CanEnableGCNumaAware()) heap_no_to_numa_node = heap_select::heap_no_to_numa_node; #else //MULTIPLE_HEAPS reserved_memory_limit = initial_heap_size; int number_of_heaps = 1; #endif //MULTIPLE_HEAPS if (heap_hard_limit) { check_commit_cs.Initialize(); } #ifdef USE_REGIONS if (regions_range) { // REGIONS TODO: we should reserve enough space at the end of what we reserved that's // big enough to accommodate if we were to materialize all the GC bookkeeping datastructures. // We only need to commit what we use and just need to commit more instead of having to // relocate the exising table and then calling copy_brick_card_table. // Right now all the non mark array portions are commmitted since I'm calling mark_card_table // on the whole range. This can be committed as needed. size_t reserve_size = regions_range; uint8_t* reserve_range = (uint8_t*)virtual_alloc (reserve_size, use_large_pages_p); if (!reserve_range) return E_OUTOFMEMORY; if (!global_region_allocator.init (reserve_range, (reserve_range + reserve_size), ((size_t)1 << min_segment_size_shr), &g_gc_lowest_address, &g_gc_highest_address)) return E_OUTOFMEMORY; bookkeeping_covered_start = global_region_allocator.get_start(); if (!allocate_initial_regions(number_of_heaps)) return E_OUTOFMEMORY; } else { assert (!"cannot use regions without specifying the range!!!"); return E_FAIL; } #else //USE_REGIONS bool separated_poh_p = use_large_pages_p && heap_hard_limit_oh[soh] && (GCConfig::GetGCHeapHardLimitPOH() == 0) && (GCConfig::GetGCHeapHardLimitPOHPercent() == 0); if (!reserve_initial_memory (soh_segment_size, loh_segment_size, poh_segment_size, number_of_heaps, use_large_pages_p, separated_poh_p, heap_no_to_numa_node)) return E_OUTOFMEMORY; if (separated_poh_p) { heap_hard_limit_oh[poh] = min_segment_size_hard_limit * number_of_heaps; heap_hard_limit += heap_hard_limit_oh[poh]; } #endif //USE_REGIONS #ifdef CARD_BUNDLE //check if we need to turn on card_bundles. #ifdef MULTIPLE_HEAPS // use INT64 arithmetic here because of possible overflow on 32p uint64_t th = (uint64_t)MH_TH_CARD_BUNDLE*number_of_heaps; #else // use INT64 arithmetic here because of possible overflow on 32p uint64_t th = (uint64_t)SH_TH_CARD_BUNDLE; #endif //MULTIPLE_HEAPS if (can_use_write_watch_for_card_table() && reserved_memory >= th) { settings.card_bundles = TRUE; } else { settings.card_bundles = FALSE; } #endif //CARD_BUNDLE settings.first_init(); int latency_level_from_config = static_cast<int>(GCConfig::GetLatencyLevel()); if (latency_level_from_config >= latency_level_first && latency_level_from_config <= latency_level_last) { gc_heap::latency_level = static_cast<gc_latency_level>(latency_level_from_config); } init_static_data(); g_gc_card_table = make_card_table (g_gc_lowest_address, g_gc_highest_address); if (!g_gc_card_table) return E_OUTOFMEMORY; gc_started = FALSE; #ifdef MULTIPLE_HEAPS g_heaps = new (nothrow) gc_heap* [number_of_heaps]; if (!g_heaps) return E_OUTOFMEMORY; #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:22011) // Suppress PREFast warning about integer underflow/overflow #endif // _PREFAST_ #if !defined(USE_REGIONS) || defined(_DEBUG) g_promoted = new (nothrow) size_t [number_of_heaps*16]; if (!g_promoted) return E_OUTOFMEMORY; #endif //!USE_REGIONS || _DEBUG #ifdef BACKGROUND_GC g_bpromoted = new (nothrow) size_t [number_of_heaps*16]; if (!g_bpromoted) return E_OUTOFMEMORY; #endif #ifdef MH_SC_MARK g_mark_stack_busy = new (nothrow) int[(number_of_heaps+2)*HS_CACHE_LINE_SIZE/sizeof(int)]; #endif //MH_SC_MARK #ifdef _PREFAST_ #pragma warning(pop) #endif // _PREFAST_ #ifdef MH_SC_MARK if (!g_mark_stack_busy) return E_OUTOFMEMORY; #endif //MH_SC_MARK if (!create_thread_support (number_of_heaps)) return E_OUTOFMEMORY; #endif //MULTIPLE_HEAPS #ifdef MULTIPLE_HEAPS yp_spin_count_unit = 32 * number_of_heaps; #else yp_spin_count_unit = 32 * g_num_processors; #endif //MULTIPLE_HEAPS #if defined(__linux__) GCToEEInterface::UpdateGCEventStatus(static_cast<int>(GCEventStatus::GetEnabledLevel(GCEventProvider_Default)), static_cast<int>(GCEventStatus::GetEnabledKeywords(GCEventProvider_Default)), static_cast<int>(GCEventStatus::GetEnabledLevel(GCEventProvider_Private)), static_cast<int>(GCEventStatus::GetEnabledKeywords(GCEventProvider_Private))); #endif // __linux__ #ifdef USE_VXSORT InitSupportedInstructionSet ((int32_t)GCConfig::GetGCEnabledInstructionSets()); #endif if (!init_semi_shared()) { hres = E_FAIL; } return hres; } //Initializes PER_HEAP_ISOLATED data members. int gc_heap::init_semi_shared() { int ret = 0; #ifdef BGC_SERVO_TUNING uint32_t current_memory_load = 0; uint32_t sweep_flr_goal = 0; uint32_t sweep_flr_goal_loh = 0; #endif //BGC_SERVO_TUNING // This is used for heap expansion - it's to fix exactly the start for gen 0 // through (max_generation-1). When we expand the heap we allocate all these // gen starts at the beginning of the new ephemeral seg. eph_gen_starts_size = (Align (min_obj_size)) * max_generation; #ifdef MULTIPLE_HEAPS mark_list_size = min (100*1024, max (8192, soh_segment_size/(2*10*32))); g_mark_list = make_mark_list (mark_list_size*n_heaps); min_balance_threshold = alloc_quantum_balance_units * CLR_SIZE * 2; g_mark_list_copy = make_mark_list (mark_list_size*n_heaps); if (!g_mark_list_copy) { goto cleanup; } #else //MULTIPLE_HEAPS mark_list_size = max (8192, soh_segment_size/(64*32)); g_mark_list = make_mark_list (mark_list_size); #endif //MULTIPLE_HEAPS dprintf (3, ("mark_list_size: %d", mark_list_size)); if (!g_mark_list) { goto cleanup; } #ifdef MULTIPLE_HEAPS // gradual decommit: set size to some reasonable value per time interval max_decommit_step_size = ((DECOMMIT_SIZE_PER_MILLISECOND * DECOMMIT_TIME_STEP_MILLISECONDS) / n_heaps); // but do at least MIN_DECOMMIT_SIZE per step to make the OS call worthwhile max_decommit_step_size = max (max_decommit_step_size, MIN_DECOMMIT_SIZE); #endif //MULTIPLE_HEAPS #ifdef FEATURE_BASICFREEZE seg_table = sorted_table::make_sorted_table(); if (!seg_table) goto cleanup; #endif //FEATURE_BASICFREEZE segment_standby_list = 0; if (!full_gc_approach_event.CreateManualEventNoThrow(FALSE)) { goto cleanup; } if (!full_gc_end_event.CreateManualEventNoThrow(FALSE)) { goto cleanup; } fgn_loh_percent = 0; full_gc_approach_event_set = false; memset (full_gc_counts, 0, sizeof (full_gc_counts)); memset (&last_ephemeral_gc_info, 0, sizeof (last_ephemeral_gc_info)); memset (&last_full_blocking_gc_info, 0, sizeof (last_full_blocking_gc_info)); #ifdef BACKGROUND_GC memset (&last_bgc_info, 0, sizeof (last_bgc_info)); #endif //BACKGROUND_GC should_expand_in_full_gc = FALSE; #ifdef FEATURE_LOH_COMPACTION loh_compaction_always_p = GCConfig::GetLOHCompactionMode() != 0; loh_compaction_mode = loh_compaction_default; #endif //FEATURE_LOH_COMPACTION loh_size_threshold = (size_t)GCConfig::GetLOHThreshold(); assert (loh_size_threshold >= LARGE_OBJECT_SIZE); #ifdef BGC_SERVO_TUNING memset (bgc_tuning::gen_calc, 0, sizeof (bgc_tuning::gen_calc)); memset (bgc_tuning::gen_stats, 0, sizeof (bgc_tuning::gen_stats)); memset (bgc_tuning::current_bgc_end_data, 0, sizeof (bgc_tuning::current_bgc_end_data)); // for the outer loop - the ML (memory load) loop bgc_tuning::enable_fl_tuning = (GCConfig::GetBGCFLTuningEnabled() != 0); bgc_tuning::memory_load_goal = (uint32_t)GCConfig::GetBGCMemGoal(); bgc_tuning::memory_load_goal_slack = (uint32_t)GCConfig::GetBGCMemGoalSlack(); bgc_tuning::ml_kp = (double)GCConfig::GetBGCMLkp() / 1000.0; bgc_tuning::ml_ki = (double)GCConfig::GetBGCMLki() / 1000.0; bgc_tuning::ratio_correction_step = (double)GCConfig::GetBGCG2RatioStep() / 100.0; // for the inner loop - the alloc loop which calculates the allocated bytes in gen2 before // triggering the next BGC. bgc_tuning::above_goal_kp = (double)GCConfig::GetBGCFLkp() / 1000000.0; bgc_tuning::enable_ki = (GCConfig::GetBGCFLEnableKi() != 0); bgc_tuning::above_goal_ki = (double)GCConfig::GetBGCFLki() / 1000000.0; bgc_tuning::enable_kd = (GCConfig::GetBGCFLEnableKd() != 0); bgc_tuning::above_goal_kd = (double)GCConfig::GetBGCFLkd() / 100.0; bgc_tuning::enable_smooth = (GCConfig::GetBGCFLEnableSmooth() != 0); bgc_tuning::num_gen1s_smooth_factor = (double)GCConfig::GetBGCFLSmoothFactor() / 100.0; bgc_tuning::enable_tbh = (GCConfig::GetBGCFLEnableTBH() != 0); bgc_tuning::enable_ff = (GCConfig::GetBGCFLEnableFF() != 0); bgc_tuning::above_goal_ff = (double)GCConfig::GetBGCFLff() / 100.0; bgc_tuning::enable_gradual_d = (GCConfig::GetBGCFLGradualD() != 0); sweep_flr_goal = (uint32_t)GCConfig::GetBGCFLSweepGoal(); sweep_flr_goal_loh = (uint32_t)GCConfig::GetBGCFLSweepGoalLOH(); bgc_tuning::gen_calc[0].sweep_flr_goal = ((sweep_flr_goal == 0) ? 20.0 : (double)sweep_flr_goal); bgc_tuning::gen_calc[1].sweep_flr_goal = ((sweep_flr_goal_loh == 0) ? 20.0 : (double)sweep_flr_goal_loh); bgc_tuning::available_memory_goal = (uint64_t)((double)gc_heap::total_physical_mem * (double)(100 - bgc_tuning::memory_load_goal) / 100); get_memory_info (&current_memory_load); dprintf (BGC_TUNING_LOG, ("BTL tuning %s!!!", (bgc_tuning::enable_fl_tuning ? "enabled" : "disabled"))); #ifdef SIMPLE_DPRINTF dprintf (BGC_TUNING_LOG, ("BTL tuning parameters: mem goal: %d%%(%I64d), +/-%d%%, gen2 correction factor: %.2f, sweep flr goal: %d%%, smooth factor: %.3f(%s), TBH: %s, FF: %.3f(%s), ml: kp %.5f, ki %.10f", bgc_tuning::memory_load_goal, bgc_tuning::available_memory_goal, bgc_tuning::memory_load_goal_slack, bgc_tuning::ratio_correction_step, (int)bgc_tuning::gen_calc[0].sweep_flr_goal, bgc_tuning::num_gen1s_smooth_factor, (bgc_tuning::enable_smooth ? "enabled" : "disabled"), (bgc_tuning::enable_tbh ? "enabled" : "disabled"), bgc_tuning::above_goal_ff, (bgc_tuning::enable_ff ? "enabled" : "disabled"), bgc_tuning::ml_kp, bgc_tuning::ml_ki)); dprintf (BGC_TUNING_LOG, ("BTL tuning parameters: kp: %.5f, ki: %.5f (%s), kd: %.3f (kd-%s, gd-%s), ff: %.3f", bgc_tuning::above_goal_kp, bgc_tuning::above_goal_ki, (bgc_tuning::enable_ki ? "enabled" : "disabled"), bgc_tuning::above_goal_kd, (bgc_tuning::enable_kd ? "enabled" : "disabled"), (bgc_tuning::enable_gradual_d ? "enabled" : "disabled"), bgc_tuning::above_goal_ff)); #endif //SIMPLE_DPRINTF if (bgc_tuning::enable_fl_tuning && (current_memory_load < bgc_tuning::memory_load_goal)) { uint32_t distance_to_goal = bgc_tuning::memory_load_goal - current_memory_load; bgc_tuning::stepping_interval = max (distance_to_goal / 10, 1); bgc_tuning::last_stepping_mem_load = current_memory_load; bgc_tuning::last_stepping_bgc_count = 0; dprintf (BGC_TUNING_LOG, ("current ml: %d, %d to goal, interval: %d", current_memory_load, distance_to_goal, bgc_tuning::stepping_interval)); } else { dprintf (BGC_TUNING_LOG, ("current ml: %d, >= goal: %d, disable stepping", current_memory_load, bgc_tuning::memory_load_goal)); bgc_tuning::use_stepping_trigger_p = false; } #endif //BGC_SERVO_TUNING #ifdef BACKGROUND_GC memset (ephemeral_fgc_counts, 0, sizeof (ephemeral_fgc_counts)); bgc_alloc_spin_count = static_cast<uint32_t>(GCConfig::GetBGCSpinCount()); bgc_alloc_spin = static_cast<uint32_t>(GCConfig::GetBGCSpin()); { int number_bgc_threads = get_num_heaps(); if (!create_bgc_threads_support (number_bgc_threads)) { goto cleanup; } } #endif //BACKGROUND_GC memset (&current_no_gc_region_info, 0, sizeof (current_no_gc_region_info)); #ifdef GC_CONFIG_DRIVEN compact_or_sweep_gcs[0] = 0; compact_or_sweep_gcs[1] = 0; #endif //GC_CONFIG_DRIVEN #ifdef SHORT_PLUGS short_plugs_pad_ratio = (double)DESIRED_PLUG_LENGTH / (double)(DESIRED_PLUG_LENGTH - Align (min_obj_size)); #endif //SHORT_PLUGS generation_skip_ratio_threshold = (int)GCConfig::GetGCLowSkipRatio(); #ifdef FEATURE_EVENT_TRACE gc_time_info = new (nothrow) uint64_t[max_compact_time_type]; if (!gc_time_info) { goto cleanup; } #ifdef BACKGROUND_GC bgc_time_info = new (nothrow) uint64_t[max_bgc_time_type]; if (!bgc_time_info) { goto cleanup; } #endif //BACKGROUND_GC #ifdef FEATURE_LOH_COMPACTION loh_compact_info = new (nothrow) etw_loh_compact_info [get_num_heaps()]; if (!loh_compact_info) { goto cleanup; } #endif //FEATURE_LOH_COMPACTION #endif //FEATURE_EVENT_TRACE conserve_mem_setting = (int)GCConfig::GetGCConserveMem(); if (conserve_mem_setting < 0) conserve_mem_setting = 0; if (conserve_mem_setting > 9) conserve_mem_setting = 9; dprintf (1, ("conserve_mem_setting = %d", conserve_mem_setting)); ret = 1; cleanup: if (!ret) { if (full_gc_approach_event.IsValid()) { full_gc_approach_event.CloseEvent(); } if (full_gc_end_event.IsValid()) { full_gc_end_event.CloseEvent(); } } return ret; } gc_heap* gc_heap::make_gc_heap ( #ifdef MULTIPLE_HEAPS GCHeap* vm_hp, int heap_number #endif //MULTIPLE_HEAPS ) { gc_heap* res = 0; #ifdef MULTIPLE_HEAPS res = new (nothrow) gc_heap; if (!res) return 0; res->vm_heap = vm_hp; res->alloc_context_count = 0; #ifndef USE_REGIONS res->mark_list_piece_start = new (nothrow) uint8_t**[n_heaps]; if (!res->mark_list_piece_start) return 0; #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:22011) // Suppress PREFast warning about integer underflow/overflow #endif // _PREFAST_ res->mark_list_piece_end = new (nothrow) uint8_t**[n_heaps + 32]; // +32 is padding to reduce false sharing #ifdef _PREFAST_ #pragma warning(pop) #endif // _PREFAST_ if (!res->mark_list_piece_end) return 0; #endif //!USE_REGIONS #endif //MULTIPLE_HEAPS if (res->init_gc_heap ( #ifdef MULTIPLE_HEAPS heap_number #else //MULTIPLE_HEAPS 0 #endif //MULTIPLE_HEAPS )==0) { return 0; } #ifdef MULTIPLE_HEAPS return res; #else return (gc_heap*)1; #endif //MULTIPLE_HEAPS } uint32_t gc_heap::wait_for_gc_done(int32_t timeOut) { bool cooperative_mode = enable_preemptive (); uint32_t dwWaitResult = NOERROR; gc_heap* wait_heap = NULL; while (gc_heap::gc_started) { #ifdef MULTIPLE_HEAPS wait_heap = GCHeap::GetHeap(heap_select::select_heap(NULL))->pGenGCHeap; dprintf(2, ("waiting for the gc_done_event on heap %d", wait_heap->heap_number)); #endif // MULTIPLE_HEAPS #ifdef _PREFAST_ PREFIX_ASSUME(wait_heap != NULL); #endif // _PREFAST_ dwWaitResult = wait_heap->gc_done_event.Wait(timeOut, FALSE); } disable_preemptive (cooperative_mode); return dwWaitResult; } void gc_heap::set_gc_done() { enter_gc_done_event_lock(); if (!gc_done_event_set) { gc_done_event_set = true; dprintf (2, ("heap %d: setting gc_done_event", heap_number)); gc_done_event.Set(); } exit_gc_done_event_lock(); } void gc_heap::reset_gc_done() { enter_gc_done_event_lock(); if (gc_done_event_set) { gc_done_event_set = false; dprintf (2, ("heap %d: resetting gc_done_event", heap_number)); gc_done_event.Reset(); } exit_gc_done_event_lock(); } void gc_heap::enter_gc_done_event_lock() { uint32_t dwSwitchCount = 0; retry: if (Interlocked::CompareExchange(&gc_done_event_lock, 0, -1) >= 0) { while (gc_done_event_lock >= 0) { if (g_num_processors > 1) { int spin_count = yp_spin_count_unit; for (int j = 0; j < spin_count; j++) { if (gc_done_event_lock < 0) break; YieldProcessor(); // indicate to the processor that we are spinning } if (gc_done_event_lock >= 0) GCToOSInterface::YieldThread(++dwSwitchCount); } else GCToOSInterface::YieldThread(++dwSwitchCount); } goto retry; } } void gc_heap::exit_gc_done_event_lock() { gc_done_event_lock = -1; } #ifndef MULTIPLE_HEAPS #ifdef RECORD_LOH_STATE int gc_heap::loh_state_index = 0; gc_heap::loh_state_info gc_heap::last_loh_states[max_saved_loh_states]; #endif //RECORD_LOH_STATE VOLATILE(int32_t) gc_heap::gc_done_event_lock; VOLATILE(bool) gc_heap::gc_done_event_set; GCEvent gc_heap::gc_done_event; #endif //!MULTIPLE_HEAPS VOLATILE(bool) gc_heap::internal_gc_done; void gc_heap::add_saved_spinlock_info ( bool loh_p, msl_enter_state enter_state, msl_take_state take_state) { #ifdef SPINLOCK_HISTORY spinlock_info* current = &last_spinlock_info[spinlock_info_index]; current->enter_state = enter_state; current->take_state = take_state; current->thread_id.SetToCurrentThread(); current->loh_p = loh_p; dprintf (SPINLOCK_LOG, ("[%d]%s %s %s", heap_number, (loh_p ? "loh" : "soh"), ((enter_state == me_acquire) ? "E" : "L"), msl_take_state_str[take_state])); spinlock_info_index++; assert (spinlock_info_index <= max_saved_spinlock_info); if (spinlock_info_index >= max_saved_spinlock_info) { spinlock_info_index = 0; } #else UNREFERENCED_PARAMETER(enter_state); UNREFERENCED_PARAMETER(take_state); #endif //SPINLOCK_HISTORY } int gc_heap::init_gc_heap (int h_number) { #ifdef MULTIPLE_HEAPS time_bgc_last = 0; for (int oh_index = 0; oh_index < (gc_oh_num::total_oh_count - 1); oh_index++) allocated_since_last_gc[oh_index] = 0; #ifdef SPINLOCK_HISTORY spinlock_info_index = 0; memset (last_spinlock_info, 0, sizeof(last_spinlock_info)); #endif //SPINLOCK_HISTORY // initialize per heap members. #ifndef USE_REGIONS ephemeral_low = (uint8_t*)1; ephemeral_high = MAX_PTR; #endif //!USE_REGIONS gc_low = 0; gc_high = 0; ephemeral_heap_segment = 0; oomhist_index_per_heap = 0; freeable_uoh_segment = 0; condemned_generation_num = 0; blocking_collection = FALSE; generation_skip_ratio = 100; #ifdef FEATURE_CARD_MARKING_STEALING n_eph_soh = 0; n_gen_soh = 0; n_eph_loh = 0; n_gen_loh = 0; #endif //FEATURE_CARD_MARKING_STEALING mark_stack_tos = 0; mark_stack_bos = 0; mark_stack_array_length = 0; mark_stack_array = 0; #if defined (_DEBUG) && defined (VERIFY_HEAP) verify_pinned_queue_p = FALSE; #endif // _DEBUG && VERIFY_HEAP #ifdef FEATURE_LOH_COMPACTION loh_pinned_queue_tos = 0; loh_pinned_queue_bos = 0; loh_pinned_queue_length = 0; loh_pinned_queue_decay = LOH_PIN_DECAY; loh_pinned_queue = 0; #endif //FEATURE_LOH_COMPACTION min_overflow_address = MAX_PTR; max_overflow_address = 0; gen0_bricks_cleared = FALSE; gen0_must_clear_bricks = 0; allocation_quantum = CLR_SIZE; more_space_lock_soh = gc_lock; more_space_lock_uoh = gc_lock; ro_segments_in_range = FALSE; loh_alloc_since_cg = 0; new_heap_segment = NULL; gen0_allocated_after_gc_p = false; #ifdef RECORD_LOH_STATE loh_state_index = 0; #endif //RECORD_LOH_STATE #endif //MULTIPLE_HEAPS #ifdef MULTIPLE_HEAPS if (h_number > n_heaps) { assert (!"Number of heaps exceeded"); return 0; } heap_number = h_number; #endif //MULTIPLE_HEAPS memset (&oom_info, 0, sizeof (oom_info)); memset (&fgm_result, 0, sizeof (fgm_result)); memset (oomhist_per_heap, 0, sizeof (oomhist_per_heap)); if (!gc_done_event.CreateManualEventNoThrow(FALSE)) { return 0; } gc_done_event_lock = -1; gc_done_event_set = false; if (!init_dynamic_data()) { return 0; } uint32_t* ct = &g_gc_card_table [card_word (card_of (g_gc_lowest_address))]; own_card_table (ct); card_table = translate_card_table (ct); brick_table = card_table_brick_table (ct); highest_address = card_table_highest_address (ct); lowest_address = card_table_lowest_address (ct); #ifdef CARD_BUNDLE card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct), g_gc_lowest_address); assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] == card_table_card_bundle_table (ct)); #endif //CARD_BUNDLE #ifdef BACKGROUND_GC if (gc_can_use_concurrent) mark_array = translate_mark_array (card_table_mark_array (&g_gc_card_table[card_word (card_of (g_gc_lowest_address))])); else mark_array = NULL; #endif //BACKGROUND_GC #ifdef USE_REGIONS #ifdef STRESS_REGIONS // Handle table APIs expect coop so we temporarily switch to coop. disable_preemptive (true); pinning_handles_for_alloc = new (nothrow) (OBJECTHANDLE[PINNING_HANDLE_INITIAL_LENGTH]); for (int i = 0; i < PINNING_HANDLE_INITIAL_LENGTH; i++) { pinning_handles_for_alloc[i] = g_gcGlobalHandleStore->CreateHandleOfType (0, HNDTYPE_PINNED); } enable_preemptive(); ph_index_per_heap = 0; pinning_seg_interval = 2; num_gen0_regions = 0; sip_seg_interval = 2; sip_seg_maxgen_interval = 3; num_condemned_regions = 0; #endif //STRESS_REGIONS end_gen0_region_space = 0; gen0_pinned_free_space = 0; gen0_large_chunk_found = false; // REGIONS PERF TODO: we should really allocate the POH regions together just so that // they wouldn't prevent us from coalescing free regions to form a large virtual address // range. if (!initial_make_soh_regions (__this) || !initial_make_uoh_regions (loh_generation, __this) || !initial_make_uoh_regions (poh_generation, __this)) { return 0; } #else //USE_REGIONS heap_segment* seg = make_initial_segment (soh_gen0, h_number, __this); if (!seg) return 0; FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(seg), (size_t)(heap_segment_reserved (seg) - heap_segment_mem(seg)), gc_etw_segment_small_object_heap); seg_mapping_table_add_segment (seg, __this); #ifdef MULTIPLE_HEAPS assert (heap_segment_heap (seg) == __this); #endif //MULTIPLE_HEAPS uint8_t* start = heap_segment_mem (seg); for (int i = max_generation; i >= 0; i--) { make_generation (i, seg, start); start += Align (min_obj_size); } heap_segment_allocated (seg) = start; alloc_allocated = start; heap_segment_used (seg) = start - plug_skew; ephemeral_heap_segment = seg; // Create segments for the large and pinned generations heap_segment* lseg = make_initial_segment(loh_generation, h_number, __this); if (!lseg) return 0; lseg->flags |= heap_segment_flags_loh; FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(lseg), (size_t)(heap_segment_reserved (lseg) - heap_segment_mem(lseg)), gc_etw_segment_large_object_heap); heap_segment* pseg = make_initial_segment (poh_generation, h_number, __this); if (!pseg) return 0; pseg->flags |= heap_segment_flags_poh; FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(pseg), (size_t)(heap_segment_reserved (pseg) - heap_segment_mem(pseg)), gc_etw_segment_pinned_object_heap); seg_mapping_table_add_segment (lseg, __this); seg_mapping_table_add_segment (pseg, __this); make_generation (loh_generation, lseg, heap_segment_mem (lseg)); make_generation (poh_generation, pseg, heap_segment_mem (pseg)); heap_segment_allocated (lseg) = heap_segment_mem (lseg) + Align (min_obj_size, get_alignment_constant (FALSE)); heap_segment_used (lseg) = heap_segment_allocated (lseg) - plug_skew; heap_segment_allocated (pseg) = heap_segment_mem (pseg) + Align (min_obj_size, get_alignment_constant (FALSE)); heap_segment_used (pseg) = heap_segment_allocated (pseg) - plug_skew; for (int gen_num = 0; gen_num < total_generation_count; gen_num++) { generation* gen = generation_of (gen_num); make_unused_array (generation_allocation_start (gen), Align (min_obj_size)); } #ifdef MULTIPLE_HEAPS assert (heap_segment_heap (lseg) == __this); assert (heap_segment_heap (pseg) == __this); #endif //MULTIPLE_HEAPS #endif //USE_REGIONS #ifdef MULTIPLE_HEAPS //initialize the alloc context heap generation_alloc_context (generation_of (soh_gen0))->set_alloc_heap(vm_heap); generation_alloc_context (generation_of (loh_generation))->set_alloc_heap(vm_heap); generation_alloc_context (generation_of (poh_generation))->set_alloc_heap(vm_heap); #endif //MULTIPLE_HEAPS generation_of (max_generation)->free_list_allocator = allocator(NUM_GEN2_ALIST, BASE_GEN2_ALIST_BITS, gen2_alloc_list, max_generation); generation_of (loh_generation)->free_list_allocator = allocator(NUM_LOH_ALIST, BASE_LOH_ALIST_BITS, loh_alloc_list); generation_of (poh_generation)->free_list_allocator = allocator(NUM_POH_ALIST, BASE_POH_ALIST_BITS, poh_alloc_list); for (int oh_index = 0; oh_index < (gc_oh_num::total_oh_count - 1); oh_index++) etw_allocation_running_amount[oh_index] = 0; total_alloc_bytes_soh = 0; total_alloc_bytes_uoh = 0; //needs to be done after the dynamic data has been initialized #ifndef MULTIPLE_HEAPS allocation_running_amount = dd_min_size (dynamic_data_of (0)); #endif //!MULTIPLE_HEAPS fgn_maxgen_percent = 0; fgn_last_alloc = dd_min_size (dynamic_data_of (0)); mark* arr = new (nothrow) (mark [MARK_STACK_INITIAL_LENGTH]); if (!arr) return 0; make_mark_stack(arr); #ifdef BACKGROUND_GC #ifdef BGC_SERVO_TUNING loh_a_no_bgc = 0; loh_a_bgc_marking = 0; loh_a_bgc_planning = 0; bgc_maxgen_end_fl_size = 0; #endif //BGC_SERVO_TUNING freeable_soh_segment = 0; gchist_index_per_heap = 0; if (gc_can_use_concurrent) { uint8_t** b_arr = new (nothrow) (uint8_t * [MARK_STACK_INITIAL_LENGTH]); if (!b_arr) return 0; make_background_mark_stack(b_arr); } #endif //BACKGROUND_GC #ifndef USE_REGIONS ephemeral_low = generation_allocation_start(generation_of(max_generation - 1)); ephemeral_high = heap_segment_reserved(ephemeral_heap_segment); #endif //!USE_REGIONS if (heap_number == 0) { stomp_write_barrier_initialize( #if defined(MULTIPLE_HEAPS) || defined(USE_REGIONS) reinterpret_cast<uint8_t*>(1), reinterpret_cast<uint8_t*>(~0) #else ephemeral_low, ephemeral_high #endif //!MULTIPLE_HEAPS || USE_REGIONS ); } #ifdef MULTIPLE_HEAPS if (!create_gc_thread ()) return 0; g_heaps [heap_number] = this; #endif //MULTIPLE_HEAPS #ifdef FEATURE_PREMORTEM_FINALIZATION HRESULT hr = AllocateCFinalize(&finalize_queue); if (FAILED(hr)) return 0; #endif // FEATURE_PREMORTEM_FINALIZATION max_free_space_items = MAX_NUM_FREE_SPACES; bestfit_seg = new (nothrow) seg_free_spaces (heap_number); if (!bestfit_seg) { return 0; } if (!bestfit_seg->alloc()) { return 0; } last_gc_before_oom = FALSE; sufficient_gen0_space_p = FALSE; #ifdef MULTIPLE_HEAPS #ifdef HEAP_ANALYZE heap_analyze_success = TRUE; internal_root_array = 0; internal_root_array_index = 0; internal_root_array_length = initial_internal_roots; current_obj = 0; current_obj_size = 0; #endif //HEAP_ANALYZE #endif // MULTIPLE_HEAPS #ifdef BACKGROUND_GC bgc_thread_id.Clear(); if (!create_bgc_thread_support()) { return 0; } bgc_alloc_lock = new (nothrow) exclusive_sync; if (!bgc_alloc_lock) { return 0; } bgc_alloc_lock->init(); bgc_thread_running = 0; bgc_thread = 0; bgc_threads_timeout_cs.Initialize(); current_bgc_state = bgc_not_in_process; background_soh_alloc_count = 0; background_uoh_alloc_count = 0; bgc_overflow_count = 0; end_loh_size = dd_min_size (dynamic_data_of (loh_generation)); end_poh_size = dd_min_size (dynamic_data_of (poh_generation)); current_sweep_pos = 0; #ifdef DOUBLY_LINKED_FL current_sweep_seg = 0; #endif //DOUBLY_LINKED_FL #endif //BACKGROUND_GC #ifdef GC_CONFIG_DRIVEN memset(interesting_data_per_heap, 0, sizeof (interesting_data_per_heap)); memset(compact_reasons_per_heap, 0, sizeof (compact_reasons_per_heap)); memset(expand_mechanisms_per_heap, 0, sizeof (expand_mechanisms_per_heap)); memset(interesting_mechanism_bits_per_heap, 0, sizeof (interesting_mechanism_bits_per_heap)); #endif //GC_CONFIG_DRIVEN return 1; } void gc_heap::destroy_semi_shared() { //TODO: will need to move this to per heap //#ifdef BACKGROUND_GC // if (c_mark_list) // delete c_mark_list; //#endif //BACKGROUND_GC if (g_mark_list) delete g_mark_list; if (seg_mapping_table) delete seg_mapping_table; #ifdef FEATURE_BASICFREEZE //destroy the segment map seg_table->delete_sorted_table(); #endif //FEATURE_BASICFREEZE } void gc_heap::self_destroy() { #ifdef BACKGROUND_GC kill_gc_thread(); #endif //BACKGROUND_GC if (gc_done_event.IsValid()) { gc_done_event.CloseEvent(); } // destroy every segment for (int i = get_start_generation_index(); i < total_generation_count; i++) { heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (i))); PREFIX_ASSUME(seg != NULL); while (seg) { heap_segment* next_seg = heap_segment_next_rw (seg); delete_heap_segment (seg); seg = next_seg; } } // get rid of the card table release_card_table (card_table); // destroy the mark stack delete mark_stack_array; #ifdef FEATURE_PREMORTEM_FINALIZATION if (finalize_queue) delete finalize_queue; #endif // FEATURE_PREMORTEM_FINALIZATION } void gc_heap::destroy_gc_heap(gc_heap* heap) { heap->self_destroy(); delete heap; } // Destroys resources owned by gc. It is assumed that a last GC has been performed and that // the finalizer queue has been drained. void gc_heap::shutdown_gc() { destroy_semi_shared(); #ifdef MULTIPLE_HEAPS //delete the heaps array delete g_heaps; destroy_thread_support(); n_heaps = 0; #endif //MULTIPLE_HEAPS //destroy seg_manager destroy_initial_memory(); GCToOSInterface::Shutdown(); } inline BOOL gc_heap::size_fit_p (size_t size REQD_ALIGN_AND_OFFSET_DCL, uint8_t* alloc_pointer, uint8_t* alloc_limit, uint8_t* old_loc, int use_padding) { BOOL already_padded = FALSE; #ifdef SHORT_PLUGS if ((old_loc != 0) && (use_padding & USE_PADDING_FRONT)) { alloc_pointer = alloc_pointer + Align (min_obj_size); already_padded = TRUE; } #endif //SHORT_PLUGS if (!((old_loc == 0) || same_large_alignment_p (old_loc, alloc_pointer))) size = size + switch_alignment_size (already_padded); #ifdef FEATURE_STRUCTALIGN alloc_pointer = StructAlign(alloc_pointer, requiredAlignment, alignmentOffset); #endif // FEATURE_STRUCTALIGN // in allocate_in_condemned_generation we can have this when we // set the alloc_limit to plan_allocated which could be less than // alloc_ptr if (alloc_limit < alloc_pointer) { return FALSE; } if (old_loc != 0) { return (((size_t)(alloc_limit - alloc_pointer) >= (size + ((use_padding & USE_PADDING_TAIL)? Align(min_obj_size) : 0))) #ifdef SHORT_PLUGS ||((!(use_padding & USE_PADDING_FRONT)) && ((alloc_pointer + size) == alloc_limit)) #else //SHORT_PLUGS ||((alloc_pointer + size) == alloc_limit) #endif //SHORT_PLUGS ); } else { assert (size == Align (min_obj_size)); return ((size_t)(alloc_limit - alloc_pointer) >= size); } } inline BOOL gc_heap::a_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit, int align_const) { // We could have run into cases where this is true when alloc_allocated is the // the same as the seg committed. if (alloc_limit < alloc_pointer) { return FALSE; } return ((size_t)(alloc_limit - alloc_pointer) >= (size + Align(min_obj_size, align_const))); } // Grow by committing more pages BOOL gc_heap::grow_heap_segment (heap_segment* seg, uint8_t* high_address, bool* hard_limit_exceeded_p) { assert (high_address <= heap_segment_reserved (seg)); if (hard_limit_exceeded_p) *hard_limit_exceeded_p = false; //return 0 if we are at the end of the segment. if (align_on_page (high_address) > heap_segment_reserved (seg)) return FALSE; if (high_address <= heap_segment_committed (seg)) return TRUE; size_t c_size = align_on_page ((size_t)(high_address - heap_segment_committed (seg))); c_size = max (c_size, commit_min_th); c_size = min (c_size, (size_t)(heap_segment_reserved (seg) - heap_segment_committed (seg))); if (c_size == 0) return FALSE; STRESS_LOG2(LF_GC, LL_INFO10000, "Growing heap_segment: %Ix high address: %Ix\n", (size_t)seg, (size_t)high_address); bool ret = virtual_commit (heap_segment_committed (seg), c_size, heap_segment_oh (seg), heap_number, hard_limit_exceeded_p); if (ret) { heap_segment_committed (seg) += c_size; STRESS_LOG1(LF_GC, LL_INFO10000, "New commit: %Ix\n", (size_t)heap_segment_committed (seg)); assert (heap_segment_committed (seg) <= heap_segment_reserved (seg)); assert (high_address <= heap_segment_committed (seg)); #if defined(MULTIPLE_HEAPS) && !defined(USE_REGIONS) // we should never increase committed beyond decommit target when gradual // decommit is in progress - if we do, this means commit and decommit are // going on at the same time. assert (!gradual_decommit_in_progress_p || (seg != ephemeral_heap_segment) || (heap_segment_committed (seg) <= heap_segment_decommit_target (seg))); #endif //MULTIPLE_HEAPS && !USE_REGIONS } return !!ret; } inline int gc_heap::grow_heap_segment (heap_segment* seg, uint8_t* allocated, uint8_t* old_loc, size_t size, BOOL pad_front_p REQD_ALIGN_AND_OFFSET_DCL) { BOOL already_padded = FALSE; #ifdef SHORT_PLUGS if ((old_loc != 0) && pad_front_p) { allocated = allocated + Align (min_obj_size); already_padded = TRUE; } #endif //SHORT_PLUGS if (!((old_loc == 0) || same_large_alignment_p (old_loc, allocated))) size += switch_alignment_size (already_padded); #ifdef FEATURE_STRUCTALIGN size_t pad = ComputeStructAlignPad(allocated, requiredAlignment, alignmentOffset); return grow_heap_segment (seg, allocated + pad + size); #else // FEATURE_STRUCTALIGN return grow_heap_segment (seg, allocated + size); #endif // FEATURE_STRUCTALIGN } // thread this object to the front of gen's free list and update stats. void gc_heap::thread_free_item_front (generation* gen, uint8_t* free_start, size_t free_size) { make_unused_array (free_start, free_size); generation_free_list_space (gen) += free_size; generation_allocator(gen)->thread_item_front (free_start, free_size); add_gen_free (gen->gen_num, free_size); if (gen->gen_num == max_generation) { dprintf (2, ("AO h%d: gen2F+: %Ix(%Id)->%Id, FO: %Id", heap_number, free_start, free_size, generation_free_list_space (gen), generation_free_obj_space (gen))); } } #ifdef DOUBLY_LINKED_FL void gc_heap::thread_item_front_added (generation* gen, uint8_t* free_start, size_t free_size) { make_unused_array (free_start, free_size); generation_free_list_space (gen) += free_size; int bucket_index = generation_allocator(gen)->thread_item_front_added (free_start, free_size); if (gen->gen_num == max_generation) { dprintf (2, ("AO [h%d] gen2FL+: %Ix(%Id)->%Id", heap_number, free_start, free_size, generation_free_list_space (gen))); } add_gen_free (gen->gen_num, free_size); } #endif //DOUBLY_LINKED_FL // this is for free objects that are not on the free list; also update stats. void gc_heap::make_free_obj (generation* gen, uint8_t* free_start, size_t free_size) { make_unused_array (free_start, free_size); generation_free_obj_space (gen) += free_size; if (gen->gen_num == max_generation) { dprintf (2, ("AO [h%d] gen2FO+: %Ix(%Id)->%Id", heap_number, free_start, free_size, generation_free_obj_space (gen))); } } //used only in older generation allocation (i.e during gc). void gc_heap::adjust_limit (uint8_t* start, size_t limit_size, generation* gen) { dprintf (3, ("gc Expanding segment allocation")); heap_segment* seg = generation_allocation_segment (gen); if ((generation_allocation_limit (gen) != start) || (start != heap_segment_plan_allocated (seg))) { if (generation_allocation_limit (gen) == heap_segment_plan_allocated (seg)) { assert (generation_allocation_pointer (gen) >= heap_segment_mem (seg)); assert (generation_allocation_pointer (gen) <= heap_segment_committed (seg)); heap_segment_plan_allocated (generation_allocation_segment (gen)) = generation_allocation_pointer (gen); } else { uint8_t* hole = generation_allocation_pointer (gen); size_t size = (generation_allocation_limit (gen) - generation_allocation_pointer (gen)); if (size != 0) { dprintf (3, ("filling up hole: %Ix, size %Ix", hole, size)); size_t allocated_size = generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen); #ifdef DOUBLY_LINKED_FL if (gen->gen_num == max_generation) { // For BGC since we need to thread the max_gen's free list as a doubly linked list we need to // preserve 5 ptr-sized words: SB | MT | Len | Next | Prev // This means we cannot simply make a filler free object right after what's allocated in this // alloc context if that's < 5-ptr sized. // if (allocated_size <= min_free_item_no_prev) { // We can't make the free object just yet. Need to record the size. size_t* filler_free_obj_size_location = (size_t*)(generation_allocation_context_start_region (gen) + min_free_item_no_prev); size_t filler_free_obj_size = 0; if (size >= (Align (min_free_list) + Align (min_obj_size))) { filler_free_obj_size = Align (min_obj_size); size_t fl_size = size - filler_free_obj_size; thread_item_front_added (gen, (hole + filler_free_obj_size), fl_size); } else { filler_free_obj_size = size; } generation_free_obj_space (gen) += filler_free_obj_size; *filler_free_obj_size_location = filler_free_obj_size; uint8_t* old_loc = generation_last_free_list_allocated (gen); // check if old_loc happens to be in a saved plug_and_gap with a pinned plug after it uint8_t* saved_plug_and_gap = nullptr; if (saved_pinned_plug_index != INVALID_SAVED_PINNED_PLUG_INDEX) { saved_plug_and_gap = pinned_plug (pinned_plug_of (saved_pinned_plug_index)) - sizeof(plug_and_gap); dprintf (3333, ("[h%d] sppi: %Id mtos: %Id old_loc: %Ix pp: %Ix(%Id) offs: %Id", heap_number, saved_pinned_plug_index, mark_stack_tos, old_loc, pinned_plug (pinned_plug_of (saved_pinned_plug_index)), pinned_len (pinned_plug_of (saved_pinned_plug_index)), old_loc - saved_plug_and_gap)); } size_t offset = old_loc - saved_plug_and_gap; if (offset < sizeof(gap_reloc_pair)) { // the object at old_loc must be at least min_obj_size assert (offset <= sizeof(plug_and_gap) - min_obj_size); // if so, set the bit in the saved info instead set_free_obj_in_compact_bit ((uint8_t*)(&pinned_plug_of (saved_pinned_plug_index)->saved_pre_plug_reloc) + offset); } else { #ifdef _DEBUG // check this looks like an object header(old_loc)->Validate(); #endif //_DEBUG set_free_obj_in_compact_bit (old_loc); } dprintf (3333, ("[h%d] ac: %Ix->%Ix((%Id < %Id), Pset %Ix s->%Id", heap_number, generation_allocation_context_start_region (gen), generation_allocation_pointer (gen), allocated_size, min_free_item_no_prev, filler_free_obj_size_location, filler_free_obj_size)); } else { if (size >= Align (min_free_list)) { thread_item_front_added (gen, hole, size); } else { make_free_obj (gen, hole, size); } } } else #endif //DOUBLY_LINKED_FL { // TODO: this should be written the same way as the above, ie, it should check // allocated_size first, but it doesn't need to do MAKE_FREE_OBJ_IN_COMPACT // related things. if (size >= Align (min_free_list)) { if (allocated_size < min_free_item_no_prev) { if (size >= (Align (min_free_list) + Align (min_obj_size))) { //split hole into min obj + threadable free item make_free_obj (gen, hole, min_obj_size); thread_free_item_front (gen, (hole + Align (min_obj_size)), (size - Align (min_obj_size))); } else { dprintf (3, ("allocated size too small, can't put back rest on free list %Ix", allocated_size)); make_free_obj (gen, hole, size); } } else { dprintf (3, ("threading hole in front of free list")); thread_free_item_front (gen, hole, size); } } else { make_free_obj (gen, hole, size); } } } } generation_allocation_pointer (gen) = start; generation_allocation_context_start_region (gen) = start; } generation_allocation_limit (gen) = (start + limit_size); } void verify_mem_cleared (uint8_t* start, size_t size) { if (!Aligned (size)) { FATAL_GC_ERROR(); } PTR_PTR curr_ptr = (PTR_PTR) start; for (size_t i = 0; i < size / sizeof(PTR_PTR); i++) { if (*(curr_ptr++) != 0) { FATAL_GC_ERROR(); } } } #if defined (VERIFY_HEAP) && defined (BACKGROUND_GC) void gc_heap::set_batch_mark_array_bits (uint8_t* start, uint8_t* end) { size_t start_mark_bit = mark_bit_of (start); size_t end_mark_bit = mark_bit_of (end); unsigned int startbit = mark_bit_bit (start_mark_bit); unsigned int endbit = mark_bit_bit (end_mark_bit); size_t startwrd = mark_bit_word (start_mark_bit); size_t endwrd = mark_bit_word (end_mark_bit); dprintf (3, ("Setting all mark array bits between [%Ix:%Ix-[%Ix:%Ix", (size_t)start, (size_t)start_mark_bit, (size_t)end, (size_t)end_mark_bit)); unsigned int firstwrd = ~(lowbits (~0, startbit)); unsigned int lastwrd = ~(highbits (~0, endbit)); if (startwrd == endwrd) { unsigned int wrd = firstwrd & lastwrd; mark_array[startwrd] |= wrd; return; } // set the first mark word. if (startbit) { mark_array[startwrd] |= firstwrd; startwrd++; } for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++) { mark_array[wrdtmp] = ~(unsigned int)0; } // set the last mark word. if (endbit) { mark_array[endwrd] |= lastwrd; } } // makes sure that the mark array bits between start and end are 0. void gc_heap::check_batch_mark_array_bits (uint8_t* start, uint8_t* end) { size_t start_mark_bit = mark_bit_of (start); size_t end_mark_bit = mark_bit_of (end); unsigned int startbit = mark_bit_bit (start_mark_bit); unsigned int endbit = mark_bit_bit (end_mark_bit); size_t startwrd = mark_bit_word (start_mark_bit); size_t endwrd = mark_bit_word (end_mark_bit); //dprintf (3, ("Setting all mark array bits between [%Ix:%Ix-[%Ix:%Ix", // (size_t)start, (size_t)start_mark_bit, // (size_t)end, (size_t)end_mark_bit)); unsigned int firstwrd = ~(lowbits (~0, startbit)); unsigned int lastwrd = ~(highbits (~0, endbit)); if (startwrd == endwrd) { unsigned int wrd = firstwrd & lastwrd; if (mark_array[startwrd] & wrd) { dprintf (1, ("The %Ix portion of mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", wrd, startwrd, mark_array [startwrd], mark_word_address (startwrd))); FATAL_GC_ERROR(); } return; } // set the first mark word. if (startbit) { if (mark_array[startwrd] & firstwrd) { dprintf (1, ("The %Ix portion of mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", firstwrd, startwrd, mark_array [startwrd], mark_word_address (startwrd))); FATAL_GC_ERROR(); } startwrd++; } for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++) { if (mark_array[wrdtmp]) { dprintf (1, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", wrdtmp, mark_array [wrdtmp], mark_word_address (wrdtmp))); FATAL_GC_ERROR(); } } // set the last mark word. if (endbit) { if (mark_array[endwrd] & lastwrd) { dprintf (1, ("The %Ix portion of mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", lastwrd, lastwrd, mark_array [lastwrd], mark_word_address (lastwrd))); FATAL_GC_ERROR(); } } } #endif //VERIFY_HEAP && BACKGROUND_GC allocator::allocator (unsigned int num_b, int fbb, alloc_list* b, int gen) { assert (num_b < MAX_BUCKET_COUNT); num_buckets = num_b; first_bucket_bits = fbb; buckets = b; gen_number = gen; } alloc_list& allocator::alloc_list_of (unsigned int bn) { assert (bn < num_buckets); if (bn == 0) return first_bucket; else return buckets [bn-1]; } size_t& allocator::alloc_list_damage_count_of (unsigned int bn) { assert (bn < num_buckets); if (bn == 0) return first_bucket.alloc_list_damage_count(); else return buckets [bn-1].alloc_list_damage_count(); } void allocator::unlink_item (unsigned int bn, uint8_t* item, uint8_t* prev_item, BOOL use_undo_p) { alloc_list* al = &alloc_list_of (bn); uint8_t* next_item = free_list_slot(item); #ifdef DOUBLY_LINKED_FL // if repair_list is TRUE yet use_undo_p is FALSE, it means we do need to make sure // this item does not look like it's on the free list as we will not have a chance to // do that later. BOOL repair_list = !discard_if_no_fit_p (); #endif //DOUBLY_LINKED_FL if (prev_item) { if (use_undo_p && (free_list_undo (prev_item) == UNDO_EMPTY)) { assert (item == free_list_slot (prev_item)); free_list_undo (prev_item) = item; alloc_list_damage_count_of (bn)++; } free_list_slot (prev_item) = next_item; } else { al->alloc_list_head() = next_item; } if (al->alloc_list_tail() == item) { al->alloc_list_tail() = prev_item; } #ifdef DOUBLY_LINKED_FL if (repair_list) { if (!use_undo_p) { free_list_prev (item) = PREV_EMPTY; } } if (gen_number == max_generation) { dprintf (3, ("[g%2d, b%2d]UL: %Ix->%Ix->%Ix (h: %Ix, t: %Ix)", gen_number, bn, free_list_prev (item), item, free_list_slot (item), al->alloc_list_head(), al->alloc_list_tail())); dprintf (3, ("[g%2d, b%2d]UL: exit, h->N: %Ix, h->P: %Ix, t->N: %Ix, t->P: %Ix", gen_number, bn, (al->alloc_list_head() ? free_list_slot (al->alloc_list_head()) : 0), (al->alloc_list_head() ? free_list_prev (al->alloc_list_head()) : 0), (al->alloc_list_tail() ? free_list_slot (al->alloc_list_tail()) : 0), (al->alloc_list_tail() ? free_list_prev (al->alloc_list_tail()) : 0))); } #endif //DOUBLY_LINKED_FL if (al->alloc_list_head() == 0) { assert (al->alloc_list_tail() == 0); } } #ifdef DOUBLY_LINKED_FL void allocator::unlink_item_no_undo (unsigned int bn, uint8_t* item, size_t size) { alloc_list* al = &alloc_list_of (bn); uint8_t* next_item = free_list_slot (item); uint8_t* prev_item = free_list_prev (item); #ifdef FL_VERIFICATION { uint8_t* start = al->alloc_list_head(); BOOL found_p = FALSE; while (start) { if (start == item) { found_p = TRUE; break; } start = free_list_slot (start); } if (!found_p) { dprintf (1, ("could not find %Ix in b%d!!!", item, a_l_number)); FATAL_GC_ERROR(); } } #endif //FL_VERIFICATION if (prev_item) { free_list_slot (prev_item) = next_item; } else { al->alloc_list_head() = next_item; } if (next_item) { free_list_prev (next_item) = prev_item; } if (al->alloc_list_tail() == item) { al->alloc_list_tail() = prev_item; } free_list_prev (item) = PREV_EMPTY; if (gen_number == max_generation) { dprintf (3, ("[g%2d, b%2d]ULN: %Ix->%Ix->%Ix (h: %Ix, t: %Ix)", gen_number, bn, free_list_prev (item), item, free_list_slot (item), al->alloc_list_head(), al->alloc_list_tail())); dprintf (3, ("[g%2d, b%2d]ULN: exit: h->N: %Ix, h->P: %Ix, t->N: %Ix, t->P: %Ix", gen_number, bn, (al->alloc_list_head() ? free_list_slot (al->alloc_list_head()) : 0), (al->alloc_list_head() ? free_list_prev (al->alloc_list_head()) : 0), (al->alloc_list_tail() ? free_list_slot (al->alloc_list_tail()) : 0), (al->alloc_list_tail() ? free_list_prev (al->alloc_list_tail()) : 0))); } } void allocator::unlink_item_no_undo (uint8_t* item, size_t size) { unsigned int bn = first_suitable_bucket (size); unlink_item_no_undo (bn, item, size); } void allocator::unlink_item_no_undo_added (unsigned int bn, uint8_t* item, uint8_t* previous_item) { alloc_list* al = &alloc_list_of (bn); uint8_t* next_item = free_list_slot (item); uint8_t* prev_item = free_list_prev (item); assert (prev_item == previous_item); if (prev_item) { free_list_slot (prev_item) = next_item; } else { al->added_alloc_list_head() = next_item; } if (next_item) { free_list_prev (next_item) = prev_item; } if (al->added_alloc_list_tail() == item) { al->added_alloc_list_tail() = prev_item; } free_list_prev (item) = PREV_EMPTY; if (gen_number == max_generation) { dprintf (3333, ("[g%2d, b%2d]ULNA: %Ix->%Ix->%Ix (h: %Ix, t: %Ix)", gen_number, bn, free_list_prev (item), item, free_list_slot (item), al->added_alloc_list_head(), al->added_alloc_list_tail())); dprintf (3333, ("[g%2d, b%2d]ULNA: exit: h->N: %Ix, h->P: %Ix, t->N: %Ix, t->P: %Ix", gen_number, bn, (al->added_alloc_list_head() ? free_list_slot (al->added_alloc_list_head()) : 0), (al->added_alloc_list_head() ? free_list_prev (al->added_alloc_list_head()) : 0), (al->added_alloc_list_tail() ? free_list_slot (al->added_alloc_list_tail()) : 0), (al->added_alloc_list_tail() ? free_list_prev (al->added_alloc_list_tail()) : 0))); } } int allocator::thread_item_front_added (uint8_t* item, size_t size) { unsigned int a_l_number = first_suitable_bucket (size); alloc_list* al = &alloc_list_of (a_l_number); free_list_slot (item) = al->added_alloc_list_head(); free_list_prev (item) = 0; // this list's UNDO is not useful. free_list_undo (item) = UNDO_EMPTY; if (al->added_alloc_list_head() != 0) { free_list_prev (al->added_alloc_list_head()) = item; } al->added_alloc_list_head() = item; if (al->added_alloc_list_tail() == 0) { al->added_alloc_list_tail() = item; } if (gen_number == max_generation) { dprintf (3333, ("[g%2d, b%2d]TFFA: exit: %Ix->%Ix->%Ix (h: %Ix, t: %Ix)", gen_number, a_l_number, free_list_prev (item), item, free_list_slot (item), al->added_alloc_list_head(), al->added_alloc_list_tail())); dprintf (3333, ("[g%2d, b%2d]TFFA: h->N: %Ix, h->P: %Ix, t->N: %Ix, t->P: %Ix", gen_number, a_l_number, (al->added_alloc_list_head() ? free_list_slot (al->added_alloc_list_head()) : 0), (al->added_alloc_list_head() ? free_list_prev (al->added_alloc_list_head()) : 0), (al->added_alloc_list_tail() ? free_list_slot (al->added_alloc_list_tail()) : 0), (al->added_alloc_list_tail() ? free_list_prev (al->added_alloc_list_tail()) : 0))); } return a_l_number; } #endif //DOUBLY_LINKED_FL void allocator::clear() { for (unsigned int i = 0; i < num_buckets; i++) { alloc_list_head_of (i) = 0; alloc_list_tail_of (i) = 0; } } //always thread to the end. void allocator::thread_item (uint8_t* item, size_t size) { unsigned int a_l_number = first_suitable_bucket (size); alloc_list* al = &alloc_list_of (a_l_number); uint8_t*& head = al->alloc_list_head(); uint8_t*& tail = al->alloc_list_tail(); if (al->alloc_list_head() == 0) { assert (al->alloc_list_tail() == 0); } free_list_slot (item) = 0; free_list_undo (item) = UNDO_EMPTY; assert (item != head); #ifdef DOUBLY_LINKED_FL if (gen_number == max_generation) { free_list_prev (item) = tail; } #endif //DOUBLY_LINKED_FL if (head == 0) { head = item; } else { assert ((free_list_slot(head) != 0) || (tail == head)); assert (item != tail); assert (free_list_slot(tail) == 0); free_list_slot (tail) = item; } tail = item; #ifdef DOUBLY_LINKED_FL if (gen_number == max_generation) { dprintf (3333, ("[g%2d, b%2d]TFE: %Ix->%Ix->%Ix (h: %Ix, t: %Ix)", gen_number, a_l_number, free_list_prev (item), item, free_list_slot (item), al->alloc_list_head(), al->alloc_list_tail())); dprintf (3333, ("[g%2d, b%2d]TFE: exit: h->N: %Ix, h->P: %Ix, t->N: %Ix, t->P: %Ix", gen_number, a_l_number, (al->alloc_list_head() ? free_list_slot (al->alloc_list_head()) : 0), (al->alloc_list_head() ? free_list_prev (al->alloc_list_head()) : 0), (al->alloc_list_tail() ? free_list_slot (al->alloc_list_tail()) : 0), (al->alloc_list_tail() ? free_list_prev (al->alloc_list_tail()) : 0))); } #endif //DOUBLY_LINKED_FL } void allocator::thread_item_front (uint8_t* item, size_t size) { unsigned int a_l_number = first_suitable_bucket (size); alloc_list* al = &alloc_list_of (a_l_number); if (al->alloc_list_head() == 0) { assert (al->alloc_list_tail() == 0); } free_list_slot (item) = al->alloc_list_head(); free_list_undo (item) = UNDO_EMPTY; if (al->alloc_list_tail() == 0) { assert (al->alloc_list_head() == 0); al->alloc_list_tail() = al->alloc_list_head(); } #ifdef DOUBLY_LINKED_FL if (gen_number == max_generation) { if (al->alloc_list_head() != 0) { free_list_prev (al->alloc_list_head()) = item; } } #endif //DOUBLY_LINKED_FL al->alloc_list_head() = item; if (al->alloc_list_tail() == 0) { al->alloc_list_tail() = item; } #ifdef DOUBLY_LINKED_FL if (gen_number == max_generation) { free_list_prev (item) = 0; dprintf (3333, ("[g%2d, b%2d]TFF: exit: %Ix->%Ix->%Ix (h: %Ix, t: %Ix)", gen_number, a_l_number, free_list_prev (item), item, free_list_slot (item), al->alloc_list_head(), al->alloc_list_tail())); dprintf (3333, ("[g%2d, b%2d]TFF: h->N: %Ix, h->P: %Ix, t->N: %Ix, t->P: %Ix", gen_number, a_l_number, (al->alloc_list_head() ? free_list_slot (al->alloc_list_head()) : 0), (al->alloc_list_head() ? free_list_prev (al->alloc_list_head()) : 0), (al->alloc_list_tail() ? free_list_slot (al->alloc_list_tail()) : 0), (al->alloc_list_tail() ? free_list_prev (al->alloc_list_tail()) : 0))); } #endif //DOUBLY_LINKED_FL } void allocator::copy_to_alloc_list (alloc_list* toalist) { for (unsigned int i = 0; i < num_buckets; i++) { toalist [i] = alloc_list_of (i); #ifdef FL_VERIFICATION size_t damage_count = alloc_list_damage_count_of (i); // We are only calling this method to copy to an empty list // so damage count is always 0 assert (damage_count == 0); uint8_t* free_item = alloc_list_head_of (i); size_t count = 0; while (free_item) { count++; free_item = free_list_slot (free_item); } toalist[i].item_count = count; #endif //FL_VERIFICATION } } void allocator::copy_from_alloc_list (alloc_list* fromalist) { BOOL repair_list = !discard_if_no_fit_p (); #ifdef DOUBLY_LINKED_FL BOOL bgc_repair_p = FALSE; if (gen_number == max_generation) { bgc_repair_p = TRUE; if (alloc_list_damage_count_of (0) != 0) { GCToOSInterface::DebugBreak(); } uint8_t* b0_head = alloc_list_head_of (0); if (b0_head) { free_list_prev (b0_head) = 0; } added_alloc_list_head_of (0) = 0; added_alloc_list_tail_of (0) = 0; } unsigned int start_index = (bgc_repair_p ? 1 : 0); #else unsigned int start_index = 0; #endif //DOUBLY_LINKED_FL for (unsigned int i = start_index; i < num_buckets; i++) { size_t count = alloc_list_damage_count_of (i); alloc_list_of (i) = fromalist [i]; assert (alloc_list_damage_count_of (i) == 0); if (repair_list) { //repair the the list //new items may have been added during the plan phase //items may have been unlinked. uint8_t* free_item = alloc_list_head_of (i); while (free_item && count) { assert (((CObjectHeader*)free_item)->IsFree()); if ((free_list_undo (free_item) != UNDO_EMPTY)) { count--; free_list_slot (free_item) = free_list_undo (free_item); free_list_undo (free_item) = UNDO_EMPTY; } free_item = free_list_slot (free_item); } #ifdef DOUBLY_LINKED_FL if (bgc_repair_p) { added_alloc_list_head_of (i) = 0; added_alloc_list_tail_of (i) = 0; } #endif //DOUBLY_LINKED_FL #ifdef FL_VERIFICATION free_item = alloc_list_head_of (i); size_t item_count = 0; while (free_item) { item_count++; free_item = free_list_slot (free_item); } assert (item_count == alloc_list_of (i).item_count); #endif //FL_VERIFICATION } #ifdef DEBUG uint8_t* tail_item = alloc_list_tail_of (i); assert ((tail_item == 0) || (free_list_slot (tail_item) == 0)); #endif } } void allocator::commit_alloc_list_changes() { BOOL repair_list = !discard_if_no_fit_p (); #ifdef DOUBLY_LINKED_FL BOOL bgc_repair_p = FALSE; if (gen_number == max_generation) { bgc_repair_p = TRUE; } #endif //DOUBLY_LINKED_FL if (repair_list) { for (unsigned int i = 0; i < num_buckets; i++) { //remove the undo info from list. uint8_t* free_item = alloc_list_head_of (i); #ifdef DOUBLY_LINKED_FL if (bgc_repair_p) { dprintf (3, ("C[b%2d] ENTRY: h: %Ix t: %Ix", i, alloc_list_head_of (i), alloc_list_tail_of (i))); } if (free_item && bgc_repair_p) { if (free_list_prev (free_item) != 0) free_list_prev (free_item) = 0; } #endif //DOUBLY_LINKED_FL size_t count = alloc_list_damage_count_of (i); while (free_item && count) { assert (((CObjectHeader*)free_item)->IsFree()); if (free_list_undo (free_item) != UNDO_EMPTY) { free_list_undo (free_item) = UNDO_EMPTY; #ifdef DOUBLY_LINKED_FL if (bgc_repair_p) { uint8_t* next_item = free_list_slot (free_item); if (next_item && (free_list_prev (next_item) != free_item)) free_list_prev (next_item) = free_item; } #endif //DOUBLY_LINKED_FL count--; } free_item = free_list_slot (free_item); } alloc_list_damage_count_of (i) = 0; #ifdef DOUBLY_LINKED_FL if (bgc_repair_p) { uint8_t* head = alloc_list_head_of (i); uint8_t* tail_added = added_alloc_list_tail_of (i); if (tail_added) { assert (free_list_slot (tail_added) == 0); if (head) { free_list_slot (tail_added) = head; free_list_prev (head) = tail_added; } } uint8_t* head_added = added_alloc_list_head_of (i); if (head_added) { alloc_list_head_of (i) = head_added; uint8_t* final_head = alloc_list_head_of (i); if (alloc_list_tail_of (i) == 0) { alloc_list_tail_of (i) = tail_added; } } added_alloc_list_head_of (i) = 0; added_alloc_list_tail_of (i) = 0; } #endif //DOUBLY_LINKED_FL } } } #ifdef USE_REGIONS void allocator::thread_sip_fl (heap_segment* region) { uint8_t* region_fl_head = region->free_list_head; uint8_t* region_fl_tail = region->free_list_tail; if (!region_fl_head) { assert (!region_fl_tail); assert (region->free_list_size == 0); return; } if (num_buckets == 1) { dprintf (REGIONS_LOG, ("threading gen%d region %Ix onto gen%d FL", heap_segment_gen_num (region), heap_segment_mem (region), gen_number)); alloc_list* al = &alloc_list_of (0); uint8_t*& head = al->alloc_list_head(); uint8_t*& tail = al->alloc_list_tail(); if (tail == 0) { assert (head == 0); head = region_fl_head; } else { free_list_slot (tail) = region_fl_head; } tail = region_fl_tail; } else { dprintf (REGIONS_LOG, ("threading gen%d region %Ix onto gen%d bucketed FL", heap_segment_gen_num (region), heap_segment_mem (region), gen_number)); // If we have a bucketed free list we'd need to go through the region's free list. uint8_t* region_fl_item = region_fl_head; size_t total_free_size = 0; while (region_fl_item) { uint8_t* next_fl_item = free_list_slot (region_fl_item); size_t size_item = size (region_fl_item); thread_item (region_fl_item, size_item); total_free_size += size_item; region_fl_item = next_fl_item; } assert (total_free_size == region->free_list_size); } } #endif //USE_REGIONS #ifdef FEATURE_EVENT_TRACE uint16_t allocator::count_largest_items (etw_bucket_info* bucket_info, size_t max_size, size_t max_item_count, size_t* recorded_fl_info_size) { assert (gen_number == max_generation); size_t size_counted_total = 0; size_t items_counted_total = 0; uint16_t bucket_info_index = 0; for (int i = (num_buckets - 1); i >= 0; i--) { uint32_t items_counted = 0; size_t size_counted = 0; uint8_t* free_item = alloc_list_head_of ((unsigned int)i); while (free_item) { assert (((CObjectHeader*)free_item)->IsFree()); size_t free_item_size = Align (size (free_item)); size_counted_total += free_item_size; size_counted += free_item_size; items_counted_total++; items_counted++; if ((size_counted_total > max_size) || (items_counted > max_item_count)) { bucket_info[bucket_info_index++].set ((uint16_t)i, items_counted, size_counted); *recorded_fl_info_size = size_counted_total; return bucket_info_index; } free_item = free_list_slot (free_item); } if (items_counted) { bucket_info[bucket_info_index++].set ((uint16_t)i, items_counted, size_counted); } } *recorded_fl_info_size = size_counted_total; return bucket_info_index; } #endif //FEATURE_EVENT_TRACE void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size, size_t size, alloc_context* acontext, uint32_t flags, heap_segment* seg, int align_const, int gen_number) { bool uoh_p = (gen_number > 0); GCSpinLock* msl = uoh_p ? &more_space_lock_uoh : &more_space_lock_soh; uint64_t& total_alloc_bytes = uoh_p ? total_alloc_bytes_uoh : total_alloc_bytes_soh; size_t aligned_min_obj_size = Align(min_obj_size, align_const); if (seg) { assert (heap_segment_used (seg) <= heap_segment_committed (seg)); } #ifdef MULTIPLE_HEAPS if (gen_number == 0) { if (!gen0_allocated_after_gc_p) { gen0_allocated_after_gc_p = true; } } #endif //MULTIPLE_HEAPS dprintf (3, ("Expanding segment allocation [%Ix, %Ix[", (size_t)start, (size_t)start + limit_size - aligned_min_obj_size)); if ((acontext->alloc_limit != start) && (acontext->alloc_limit + aligned_min_obj_size)!= start) { uint8_t* hole = acontext->alloc_ptr; if (hole != 0) { size_t ac_size = (acontext->alloc_limit - acontext->alloc_ptr); dprintf (3, ("filling up hole [%Ix, %Ix[", (size_t)hole, (size_t)hole + ac_size + aligned_min_obj_size)); // when we are finishing an allocation from a free list // we know that the free area was Align(min_obj_size) larger acontext->alloc_bytes -= ac_size; total_alloc_bytes -= ac_size; size_t free_obj_size = ac_size + aligned_min_obj_size; make_unused_array (hole, free_obj_size); generation_free_obj_space (generation_of (gen_number)) += free_obj_size; } acontext->alloc_ptr = start; } else { if (gen_number == 0) { #ifdef USE_REGIONS if (acontext->alloc_ptr == 0) { acontext->alloc_ptr = start; } else #endif //USE_REGIONS { size_t pad_size = aligned_min_obj_size; dprintf (3, ("contigous ac: making min obj gap %Ix->%Ix(%Id)", acontext->alloc_ptr, (acontext->alloc_ptr + pad_size), pad_size)); make_unused_array (acontext->alloc_ptr, pad_size); acontext->alloc_ptr += pad_size; } } } acontext->alloc_limit = (start + limit_size - aligned_min_obj_size); size_t added_bytes = limit_size - ((gen_number <= max_generation) ? aligned_min_obj_size : 0); acontext->alloc_bytes += added_bytes; total_alloc_bytes += added_bytes; size_t etw_allocation_amount = 0; bool fire_event_p = update_alloc_info (gen_number, added_bytes, &etw_allocation_amount); uint8_t* saved_used = 0; if (seg) { saved_used = heap_segment_used (seg); } if (seg == ephemeral_heap_segment) { //Sometimes the allocated size is advanced without clearing the //memory. Let's catch up here if (heap_segment_used (seg) < (alloc_allocated - plug_skew)) { heap_segment_used (seg) = alloc_allocated - plug_skew; assert (heap_segment_mem (seg) <= heap_segment_used (seg)); assert (heap_segment_used (seg) <= heap_segment_reserved (seg)); } } #ifdef BACKGROUND_GC else if (seg) { uint8_t* old_allocated = heap_segment_allocated (seg) - plug_skew - limit_size; #ifdef FEATURE_LOH_COMPACTION if (gen_number == loh_generation) { old_allocated -= Align (loh_padding_obj_size, align_const); } #endif //FEATURE_LOH_COMPACTION assert (heap_segment_used (seg) >= old_allocated); } #endif //BACKGROUND_GC // we are going to clear a right-edge exclusive span [clear_start, clear_limit) // but will adjust for cases when object is ok to stay dirty or the space has not seen any use yet // NB: the size and limit_size include syncblock, which is to the -1 of the object start // that effectively shifts the allocation by `plug_skew` uint8_t* clear_start = start - plug_skew; uint8_t* clear_limit = start + limit_size - plug_skew; if (flags & GC_ALLOC_ZEROING_OPTIONAL) { uint8_t* obj_start = acontext->alloc_ptr; assert(start >= obj_start); uint8_t* obj_end = obj_start + size - plug_skew; assert(obj_end >= clear_start); // if clearing at the object start, clear the syncblock. if(obj_start == start) { *(PTR_PTR)clear_start = 0; } // skip the rest of the object dprintf(3, ("zeroing optional: skipping object at %Ix->%Ix(%Id)", clear_start, obj_end, obj_end - clear_start)); clear_start = obj_end; } // fetch the ephemeral_heap_segment *before* we release the msl // - ephemeral_heap_segment may change due to other threads allocating heap_segment* gen0_segment = ephemeral_heap_segment; // check if space to clear is all dirty from prior use or only partially if ((seg == 0) || (clear_limit <= heap_segment_used (seg))) { add_saved_spinlock_info (uoh_p, me_release, mt_clr_mem); leave_spin_lock (msl); if (clear_start < clear_limit) { dprintf(3, ("clearing memory at %Ix for %d bytes", clear_start, clear_limit - clear_start)); memclr(clear_start, clear_limit - clear_start); } } else { // we only need to clear [clear_start, used) and only if clear_start < used uint8_t* used = heap_segment_used (seg); heap_segment_used (seg) = clear_limit; add_saved_spinlock_info (uoh_p, me_release, mt_clr_mem); leave_spin_lock (msl); if (clear_start < used) { if (used != saved_used) { FATAL_GC_ERROR(); } dprintf (2, ("clearing memory before used at %Ix for %Id bytes", clear_start, used - clear_start)); memclr (clear_start, used - clear_start); } } #ifdef FEATURE_EVENT_TRACE if (fire_event_p) { fire_etw_allocation_event (etw_allocation_amount, gen_number, acontext->alloc_ptr, size); } #endif //FEATURE_EVENT_TRACE //this portion can be done after we release the lock if (seg == gen0_segment || ((seg == nullptr) && (gen_number == 0) && (limit_size >= CLR_SIZE / 2))) { if (gen0_must_clear_bricks > 0) { //set the brick table to speed up find_object size_t b = brick_of (acontext->alloc_ptr); set_brick (b, acontext->alloc_ptr - brick_address (b)); b++; dprintf (3, ("Allocation Clearing bricks [%Ix, %Ix[", b, brick_of (align_on_brick (start + limit_size)))); volatile short* x = &brick_table [b]; short* end_x = &brick_table [brick_of (align_on_brick (start + limit_size))]; for (;x < end_x;x++) *x = -1; } else { gen0_bricks_cleared = FALSE; } } // verifying the memory is completely cleared. //if (!(flags & GC_ALLOC_ZEROING_OPTIONAL)) //{ // verify_mem_cleared(start - plug_skew, limit_size); //} } size_t gc_heap::new_allocation_limit (size_t size, size_t physical_limit, int gen_number) { dynamic_data* dd = dynamic_data_of (gen_number); ptrdiff_t new_alloc = dd_new_allocation (dd); assert (new_alloc == (ptrdiff_t)Align (new_alloc, get_alignment_constant (gen_number < uoh_start_generation))); ptrdiff_t logical_limit = max (new_alloc, (ptrdiff_t)size); size_t limit = min (logical_limit, (ptrdiff_t)physical_limit); assert (limit == Align (limit, get_alignment_constant (gen_number <= max_generation))); return limit; } size_t gc_heap::limit_from_size (size_t size, uint32_t flags, size_t physical_limit, int gen_number, int align_const) { size_t padded_size = size + Align (min_obj_size, align_const); // for LOH this is not true...we could select a physical_limit that's exactly the same // as size. assert ((gen_number != 0) || (physical_limit >= padded_size)); // For SOH if the size asked for is very small, we want to allocate more than just what's asked for if possible. // Unless we were told not to clean, then we will not force it. size_t min_size_to_allocate = ((gen_number == 0 && !(flags & GC_ALLOC_ZEROING_OPTIONAL)) ? allocation_quantum : 0); size_t desired_size_to_allocate = max (padded_size, min_size_to_allocate); size_t new_physical_limit = min (physical_limit, desired_size_to_allocate); size_t new_limit = new_allocation_limit (padded_size, new_physical_limit, gen_number); assert (new_limit >= (size + Align (min_obj_size, align_const))); dprintf (3, ("h%d requested to allocate %Id bytes, actual size is %Id, phy limit: %Id", heap_number, size, new_limit, physical_limit)); return new_limit; } void gc_heap::add_to_oom_history_per_heap() { oom_history* current_hist = &oomhist_per_heap[oomhist_index_per_heap]; memcpy (current_hist, &oom_info, sizeof (oom_info)); oomhist_index_per_heap++; if (oomhist_index_per_heap == max_oom_history_count) { oomhist_index_per_heap = 0; } } void gc_heap::handle_oom (oom_reason reason, size_t alloc_size, uint8_t* allocated, uint8_t* reserved) { if (reason == oom_budget) { alloc_size = dd_min_size (dynamic_data_of (0)) / 2; } if ((reason == oom_budget) && ((!fgm_result.loh_p) && (fgm_result.fgm != fgm_no_failure))) { // This means during the last GC we needed to reserve and/or commit more memory // but we couldn't. We proceeded with the GC and ended up not having enough // memory at the end. This is a legitimate OOM situtation. Otherwise we // probably made a mistake and didn't expand the heap when we should have. reason = oom_low_mem; } oom_info.reason = reason; oom_info.allocated = allocated; oom_info.reserved = reserved; oom_info.alloc_size = alloc_size; oom_info.gc_index = settings.gc_index; oom_info.fgm = fgm_result.fgm; oom_info.size = fgm_result.size; oom_info.available_pagefile_mb = fgm_result.available_pagefile_mb; oom_info.loh_p = fgm_result.loh_p; add_to_oom_history_per_heap(); fgm_result.fgm = fgm_no_failure; // Break early - before the more_space_lock is release so no other threads // could have allocated on the same heap when OOM happened. if (GCConfig::GetBreakOnOOM()) { GCToOSInterface::DebugBreak(); } } #ifdef BACKGROUND_GC BOOL gc_heap::background_allowed_p() { return ( gc_can_use_concurrent && ((settings.pause_mode == pause_interactive) || (settings.pause_mode == pause_sustained_low_latency)) ); } #endif //BACKGROUND_GC void gc_heap::check_for_full_gc (int gen_num, size_t size) { BOOL should_notify = FALSE; // if we detect full gc because of the allocation budget specified this is TRUE; // it's FALSE if it's due to other factors. BOOL alloc_factor = TRUE; int n_initial = gen_num; BOOL local_blocking_collection = FALSE; BOOL local_elevation_requested = FALSE; int new_alloc_remain_percent = 0; if (full_gc_approach_event_set) { return; } if (gen_num < max_generation) { gen_num = max_generation; } dynamic_data* dd_full = dynamic_data_of (gen_num); ptrdiff_t new_alloc_remain = 0; uint32_t pct = (gen_num >= uoh_start_generation) ? fgn_loh_percent : fgn_maxgen_percent; for (int gen_index = 0; gen_index < total_generation_count; gen_index++) { dprintf (2, ("FGN: h#%d: gen%d: %Id(%Id)", heap_number, gen_index, dd_new_allocation (dynamic_data_of (gen_index)), dd_desired_allocation (dynamic_data_of (gen_index)))); } // For small object allocations we only check every fgn_check_quantum bytes. if (n_initial == 0) { dprintf (2, ("FGN: gen0 last recorded alloc: %Id", fgn_last_alloc)); dynamic_data* dd_0 = dynamic_data_of (n_initial); if (((fgn_last_alloc - dd_new_allocation (dd_0)) < fgn_check_quantum) && (dd_new_allocation (dd_0) >= 0)) { return; } else { fgn_last_alloc = dd_new_allocation (dd_0); dprintf (2, ("FGN: gen0 last recorded alloc is now: %Id", fgn_last_alloc)); } // We don't consider the size that came from soh 'cause it doesn't contribute to the // gen2 budget. size = 0; } int n = 0; for (int i = 1; i <= max_generation; i++) { if (get_new_allocation (i) <= 0) { n = i; } else break; } dprintf (2, ("FGN: h#%d: gen%d budget exceeded", heap_number, n)); if (gen_num == max_generation) { // If it's small object heap we should first see if we will even be looking at gen2 budget // in the next GC or not. If not we should go directly to checking other factors. if (n < (max_generation - 1)) { goto check_other_factors; } } new_alloc_remain = dd_new_allocation (dd_full) - size; new_alloc_remain_percent = (int)(((float)(new_alloc_remain) / (float)dd_desired_allocation (dd_full)) * 100); dprintf (2, ("FGN: alloc threshold for gen%d is %d%%, current threshold is %d%%", gen_num, pct, new_alloc_remain_percent)); if (new_alloc_remain_percent <= (int)pct) { #ifdef BACKGROUND_GC // If background GC is enabled, we still want to check whether this will // be a blocking GC or not because we only want to notify when it's a // blocking full GC. if (background_allowed_p()) { goto check_other_factors; } #endif //BACKGROUND_GC should_notify = TRUE; goto done; } check_other_factors: dprintf (2, ("FGC: checking other factors")); n = generation_to_condemn (n, &local_blocking_collection, &local_elevation_requested, TRUE); if (local_elevation_requested && (n == max_generation)) { if (settings.should_lock_elevation) { int local_elevation_locked_count = settings.elevation_locked_count + 1; if (local_elevation_locked_count != 6) { dprintf (2, ("FGN: lock count is %d - Condemning max_generation-1", local_elevation_locked_count)); n = max_generation - 1; } } } dprintf (2, ("FGN: we estimate gen%d will be collected", n)); #ifdef BACKGROUND_GC // When background GC is enabled it decreases the accuracy of our predictability - // by the time the GC happens, we may not be under BGC anymore. If we try to // predict often enough it should be ok. if ((n == max_generation) && (gc_heap::background_running_p())) { n = max_generation - 1; dprintf (2, ("FGN: bgc - 1 instead of 2")); } if ((n == max_generation) && !local_blocking_collection) { if (!background_allowed_p()) { local_blocking_collection = TRUE; } } #endif //BACKGROUND_GC dprintf (2, ("FGN: we estimate gen%d will be collected: %s", n, (local_blocking_collection ? "blocking" : "background"))); if ((n == max_generation) && local_blocking_collection) { alloc_factor = FALSE; should_notify = TRUE; goto done; } done: if (should_notify) { dprintf (2, ("FGN: gen%d detecting full GC approaching(%s) (GC#%d) (%Id%% left in gen%d)", n_initial, (alloc_factor ? "alloc" : "other"), dd_collection_count (dynamic_data_of (0)), new_alloc_remain_percent, gen_num)); send_full_gc_notification (n_initial, alloc_factor); } } void gc_heap::send_full_gc_notification (int gen_num, BOOL due_to_alloc_p) { if (!full_gc_approach_event_set) { assert (full_gc_approach_event.IsValid()); FIRE_EVENT(GCFullNotify_V1, gen_num, due_to_alloc_p); full_gc_end_event.Reset(); full_gc_approach_event.Set(); full_gc_approach_event_set = true; } } wait_full_gc_status gc_heap::full_gc_wait (GCEvent *event, int time_out_ms) { #ifdef MULTIPLE_HEAPS gc_heap* hp = gc_heap::g_heaps[0]; #else gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS if (hp->fgn_maxgen_percent == 0) { return wait_full_gc_na; } uint32_t wait_result = user_thread_wait(event, FALSE, time_out_ms); if ((wait_result == WAIT_OBJECT_0) || (wait_result == WAIT_TIMEOUT)) { if (hp->fgn_maxgen_percent == 0) { return wait_full_gc_cancelled; } if (wait_result == WAIT_OBJECT_0) { #ifdef BACKGROUND_GC if (fgn_last_gc_was_concurrent) { fgn_last_gc_was_concurrent = FALSE; return wait_full_gc_na; } else #endif //BACKGROUND_GC { return wait_full_gc_success; } } else { return wait_full_gc_timeout; } } else { return wait_full_gc_failed; } } size_t gc_heap::get_full_compact_gc_count() { return full_gc_counts[gc_type_compacting]; } // DTREVIEW - we should check this in dt_low_ephemeral_space_p // as well. inline BOOL gc_heap::short_on_end_of_seg (heap_segment* seg) { uint8_t* allocated = heap_segment_allocated (seg); #ifdef USE_REGIONS BOOL sufficient_p = sufficient_space_regions (end_gen0_region_space, end_space_after_gc()); #else BOOL sufficient_p = sufficient_space_end_seg (allocated, heap_segment_committed (seg), heap_segment_reserved (seg), end_space_after_gc()); #endif //USE_REGIONS if (!sufficient_p) { if (sufficient_gen0_space_p) { dprintf (GTC_LOG, ("gen0 has enough free space")); } sufficient_p = sufficient_gen0_space_p; } return !sufficient_p; } #ifdef _MSC_VER #pragma warning(disable:4706) // "assignment within conditional expression" is intentional in this function. #endif // _MSC_VER inline BOOL gc_heap::a_fit_free_list_p (int gen_number, size_t size, alloc_context* acontext, uint32_t flags, int align_const) { BOOL can_fit = FALSE; generation* gen = generation_of (gen_number); allocator* gen_allocator = generation_allocator (gen); for (unsigned int a_l_idx = gen_allocator->first_suitable_bucket(size); a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++) { uint8_t* free_list = gen_allocator->alloc_list_head_of (a_l_idx); uint8_t* prev_free_item = 0; while (free_list != 0) { dprintf (3, ("considering free list %Ix", (size_t)free_list)); size_t free_list_size = unused_array_size (free_list); if ((size + Align (min_obj_size, align_const)) <= free_list_size) { dprintf (3, ("Found adequate unused area: [%Ix, size: %Id", (size_t)free_list, free_list_size)); gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE); // We ask for more Align (min_obj_size) // to make sure that we can insert a free object // in adjust_limit will set the limit lower size_t limit = limit_from_size (size, flags, free_list_size, gen_number, align_const); dd_new_allocation (dynamic_data_of (gen_number)) -= limit; uint8_t* remain = (free_list + limit); size_t remain_size = (free_list_size - limit); if (remain_size >= Align(min_free_list, align_const)) { make_unused_array (remain, remain_size); gen_allocator->thread_item_front (remain, remain_size); assert (remain_size >= Align (min_obj_size, align_const)); } else { //absorb the entire free list limit += remain_size; } generation_free_list_space (gen) -= limit; assert ((ptrdiff_t)generation_free_list_space (gen) >= 0); adjust_limit_clr (free_list, limit, size, acontext, flags, 0, align_const, gen_number); can_fit = TRUE; goto end; } else if (gen_allocator->discard_if_no_fit_p()) { assert (prev_free_item == 0); dprintf (3, ("couldn't use this free area, discarding")); generation_free_obj_space (gen) += free_list_size; gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE); generation_free_list_space (gen) -= free_list_size; assert ((ptrdiff_t)generation_free_list_space (gen) >= 0); } else { prev_free_item = free_list; } free_list = free_list_slot (free_list); } } end: return can_fit; } #ifdef BACKGROUND_GC void gc_heap::bgc_uoh_alloc_clr (uint8_t* alloc_start, size_t size, alloc_context* acontext, uint32_t flags, int gen_number, int align_const, int lock_index, BOOL check_used_p, heap_segment* seg) { make_unused_array (alloc_start, size); #ifdef DOUBLY_LINKED_FL clear_prev_bit (alloc_start, size); #endif //DOUBLY_LINKED_FL size_t size_of_array_base = sizeof(ArrayBase); bgc_alloc_lock->uoh_alloc_done_with_index (lock_index); // clear memory while not holding the lock. size_t size_to_skip = size_of_array_base; size_t size_to_clear = size - size_to_skip - plug_skew; size_t saved_size_to_clear = size_to_clear; if (check_used_p) { uint8_t* end = alloc_start + size - plug_skew; uint8_t* used = heap_segment_used (seg); if (used < end) { if ((alloc_start + size_to_skip) < used) { size_to_clear = used - (alloc_start + size_to_skip); } else { size_to_clear = 0; } dprintf (2, ("bgc uoh: setting used to %Ix", end)); heap_segment_used (seg) = end; } dprintf (2, ("bgc uoh: used: %Ix, alloc: %Ix, end of alloc: %Ix, clear %Id bytes", used, alloc_start, end, size_to_clear)); } else { dprintf (2, ("bgc uoh: [%Ix-[%Ix(%Id)", alloc_start, alloc_start+size, size)); } #ifdef VERIFY_HEAP // since we filled in 0xcc for free object when we verify heap, // we need to make sure we clear those bytes. if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { if (size_to_clear < saved_size_to_clear) { size_to_clear = saved_size_to_clear; } } #endif //VERIFY_HEAP size_t allocated_size = size - Align (min_obj_size, align_const); total_alloc_bytes_uoh += allocated_size; size_t etw_allocation_amount = 0; bool fire_event_p = update_alloc_info (gen_number, allocated_size, &etw_allocation_amount); dprintf (SPINLOCK_LOG, ("[%d]Lmsl to clear uoh obj", heap_number)); add_saved_spinlock_info (true, me_release, mt_clr_large_mem); leave_spin_lock (&more_space_lock_uoh); #ifdef FEATURE_EVENT_TRACE if (fire_event_p) { fire_etw_allocation_event (etw_allocation_amount, gen_number, alloc_start, size); } #endif //FEATURE_EVENT_TRACE ((void**) alloc_start)[-1] = 0; //clear the sync block if (!(flags & GC_ALLOC_ZEROING_OPTIONAL)) { memclr(alloc_start + size_to_skip, size_to_clear); } bgc_alloc_lock->uoh_alloc_set (alloc_start); acontext->alloc_ptr = alloc_start; acontext->alloc_limit = (alloc_start + size - Align (min_obj_size, align_const)); // need to clear the rest of the object before we hand it out. clear_unused_array(alloc_start, size); } #endif //BACKGROUND_GC BOOL gc_heap::a_fit_free_list_uoh_p (size_t size, alloc_context* acontext, uint32_t flags, int align_const, int gen_number) { BOOL can_fit = FALSE; generation* gen = generation_of (gen_number); allocator* allocator = generation_allocator (gen); #ifdef FEATURE_LOH_COMPACTION size_t loh_pad = gen_number == loh_generation ? Align (loh_padding_obj_size, align_const) : 0; #endif //FEATURE_LOH_COMPACTION #ifdef BACKGROUND_GC int cookie = -1; #endif //BACKGROUND_GC for (unsigned int a_l_idx = allocator->first_suitable_bucket(size); a_l_idx < allocator->number_of_buckets(); a_l_idx++) { uint8_t* free_list = allocator->alloc_list_head_of (a_l_idx); uint8_t* prev_free_item = 0; while (free_list != 0) { dprintf (3, ("considering free list %Ix", (size_t)free_list)); size_t free_list_size = unused_array_size(free_list); ptrdiff_t diff = free_list_size - size; #ifdef FEATURE_LOH_COMPACTION diff -= loh_pad; #endif //FEATURE_LOH_COMPACTION // must fit exactly or leave formattable space if ((diff == 0) || (diff >= (ptrdiff_t)Align (min_obj_size, align_const))) { #ifdef BACKGROUND_GC cookie = bgc_alloc_lock->uoh_alloc_set (free_list); bgc_track_uoh_alloc(); #endif //BACKGROUND_GC allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE); remove_gen_free (gen_number, free_list_size); // Substract min obj size because limit_from_size adds it. Not needed for LOH size_t limit = limit_from_size (size - Align(min_obj_size, align_const), flags, free_list_size, gen_number, align_const); dd_new_allocation (dynamic_data_of (gen_number)) -= limit; #ifdef FEATURE_LOH_COMPACTION if (loh_pad) { make_unused_array (free_list, loh_pad); generation_free_obj_space (gen) += loh_pad; limit -= loh_pad; free_list += loh_pad; free_list_size -= loh_pad; } #endif //FEATURE_LOH_COMPACTION uint8_t* remain = (free_list + limit); size_t remain_size = (free_list_size - limit); if (remain_size != 0) { assert (remain_size >= Align (min_obj_size, align_const)); make_unused_array (remain, remain_size); } if (remain_size >= Align(min_free_list, align_const)) { uoh_thread_gap_front (remain, remain_size, gen); add_gen_free (gen_number, remain_size); assert (remain_size >= Align (min_obj_size, align_const)); } else { generation_free_obj_space (gen) += remain_size; } generation_free_list_space (gen) -= free_list_size; assert ((ptrdiff_t)generation_free_list_space (gen) >= 0); generation_free_list_allocated (gen) += limit; dprintf (3, ("found fit on loh at %Ix", free_list)); #ifdef BACKGROUND_GC if (cookie != -1) { bgc_uoh_alloc_clr (free_list, limit, acontext, flags, gen_number, align_const, cookie, FALSE, 0); } else #endif //BACKGROUND_GC { adjust_limit_clr (free_list, limit, size, acontext, flags, 0, align_const, gen_number); } //fix the limit to compensate for adjust_limit_clr making it too short acontext->alloc_limit += Align (min_obj_size, align_const); can_fit = TRUE; goto exit; } prev_free_item = free_list; free_list = free_list_slot (free_list); } } exit: return can_fit; } #ifdef _MSC_VER #pragma warning(default:4706) #endif // _MSC_VER BOOL gc_heap::a_fit_segment_end_p (int gen_number, heap_segment* seg, size_t size, alloc_context* acontext, uint32_t flags, int align_const, BOOL* commit_failed_p) { *commit_failed_p = FALSE; size_t limit = 0; bool hard_limit_short_seg_end_p = false; #ifdef BACKGROUND_GC int cookie = -1; #endif //BACKGROUND_GC uint8_t*& allocated = ((gen_number == 0) ? alloc_allocated : heap_segment_allocated(seg)); size_t pad = Align (min_obj_size, align_const); #ifdef FEATURE_LOH_COMPACTION size_t loh_pad = Align (loh_padding_obj_size, align_const); if (gen_number == loh_generation) { pad += loh_pad; } #endif //FEATURE_LOH_COMPACTION uint8_t* end = heap_segment_committed (seg) - pad; if (a_size_fit_p (size, allocated, end, align_const)) { limit = limit_from_size (size, flags, (end - allocated), gen_number, align_const); goto found_fit; } end = heap_segment_reserved (seg) - pad; if ((heap_segment_reserved (seg) != heap_segment_committed (seg)) && (a_size_fit_p (size, allocated, end, align_const))) { limit = limit_from_size (size, flags, (end - allocated), gen_number, align_const); if (grow_heap_segment (seg, (allocated + limit), &hard_limit_short_seg_end_p)) { goto found_fit; } else { if (!hard_limit_short_seg_end_p) { dprintf (2, ("can't grow segment, doing a full gc")); *commit_failed_p = TRUE; } else { assert (heap_hard_limit); } } } goto found_no_fit; found_fit: dd_new_allocation (dynamic_data_of (gen_number)) -= limit; #ifdef BACKGROUND_GC if (gen_number != 0) { cookie = bgc_alloc_lock->uoh_alloc_set (allocated); bgc_track_uoh_alloc(); } #endif //BACKGROUND_GC #ifdef FEATURE_LOH_COMPACTION if (gen_number == loh_generation) { make_unused_array (allocated, loh_pad); generation_free_obj_space (generation_of (gen_number)) += loh_pad; allocated += loh_pad; limit -= loh_pad; } #endif //FEATURE_LOH_COMPACTION #if defined (VERIFY_HEAP) && defined (_DEBUG) // we are responsible for cleaning the syncblock and we will do it later // as a part of cleanup routine and when not holding the heap lock. // However, once we move "allocated" forward and if another thread initiate verification of // the previous object, it may consider the syncblock in the "next" eligible for validation. // (see also: object.cpp/Object::ValidateInner) // Make sure it will see cleaned up state to prevent triggering occasional verification failures. // And make sure the write happens before updating "allocated" VolatileStore(((void**)allocated - 1), (void*)0); //clear the sync block #endif //VERIFY_HEAP && _DEBUG uint8_t* old_alloc; old_alloc = allocated; dprintf (3, ("found fit at end of seg: %Ix", old_alloc)); #ifdef BACKGROUND_GC if (cookie != -1) { allocated += limit; bgc_uoh_alloc_clr (old_alloc, limit, acontext, flags, gen_number, align_const, cookie, TRUE, seg); } else #endif //BACKGROUND_GC { // In a contiguous AC case with GC_ALLOC_ZEROING_OPTIONAL, deduct unspent space from the limit to // clear only what is necessary. if ((flags & GC_ALLOC_ZEROING_OPTIONAL) && ((allocated == acontext->alloc_limit) || (allocated == (acontext->alloc_limit + Align (min_obj_size, align_const))))) { assert(gen_number == 0); assert(allocated > acontext->alloc_ptr); size_t extra = allocated - acontext->alloc_ptr; limit -= extra; // Since we are not consuming all the memory we already deducted from the budget, // we should put the extra back. dynamic_data* dd = dynamic_data_of (0); dd_new_allocation (dd) += extra; // add space for an AC continuity divider limit += Align(min_obj_size, align_const); } allocated += limit; adjust_limit_clr (old_alloc, limit, size, acontext, flags, seg, align_const, gen_number); } return TRUE; found_no_fit: return FALSE; } BOOL gc_heap::uoh_a_fit_segment_end_p (int gen_number, size_t size, alloc_context* acontext, uint32_t flags, int align_const, BOOL* commit_failed_p, oom_reason* oom_r) { *commit_failed_p = FALSE; generation* gen = generation_of (gen_number); heap_segment* seg = generation_allocation_segment (gen); BOOL can_allocate_p = FALSE; while (seg) { #ifdef BACKGROUND_GC if (seg->flags & heap_segment_flags_uoh_delete) { dprintf (3, ("h%d skipping seg %Ix to be deleted", heap_number, (size_t)seg)); } else #endif //BACKGROUND_GC { if (a_fit_segment_end_p (gen_number, seg, (size - Align (min_obj_size, align_const)), acontext, flags, align_const, commit_failed_p)) { acontext->alloc_limit += Align (min_obj_size, align_const); can_allocate_p = TRUE; break; } if (*commit_failed_p) { *oom_r = oom_cant_commit; break; } } seg = heap_segment_next_rw (seg); } if (can_allocate_p) { generation_end_seg_allocated (gen) += size; } return can_allocate_p; } #ifdef BACKGROUND_GC inline void gc_heap::wait_for_background (alloc_wait_reason awr, bool loh_p) { GCSpinLock* msl = loh_p ? &more_space_lock_uoh : &more_space_lock_soh; dprintf (2, ("BGC is already in progress, waiting for it to finish")); add_saved_spinlock_info (loh_p, me_release, mt_wait_bgc); leave_spin_lock (msl); background_gc_wait (awr); enter_spin_lock (msl); add_saved_spinlock_info (loh_p, me_acquire, mt_wait_bgc); } bool gc_heap::wait_for_bgc_high_memory (alloc_wait_reason awr, bool loh_p) { bool wait_p = false; if (gc_heap::background_running_p()) { uint32_t memory_load; get_memory_info (&memory_load); if (memory_load >= m_high_memory_load_th) { wait_p = true; dprintf (GTC_LOG, ("high mem - wait for BGC to finish, wait reason: %d", awr)); wait_for_background (awr, loh_p); } } return wait_p; } #endif //BACKGROUND_GC // We request to trigger an ephemeral GC but we may get a full compacting GC. // return TRUE if that's the case. BOOL gc_heap::trigger_ephemeral_gc (gc_reason gr) { #ifdef BACKGROUND_GC wait_for_bgc_high_memory (awr_loh_oos_bgc, false); #endif //BACKGROUND_GC BOOL did_full_compact_gc = FALSE; dprintf (1, ("h%d triggering a gen1 GC", heap_number)); size_t last_full_compact_gc_count = get_full_compact_gc_count(); vm_heap->GarbageCollectGeneration(max_generation - 1, gr); #ifdef MULTIPLE_HEAPS enter_spin_lock (&more_space_lock_soh); add_saved_spinlock_info (false, me_acquire, mt_t_eph_gc); #endif //MULTIPLE_HEAPS size_t current_full_compact_gc_count = get_full_compact_gc_count(); if (current_full_compact_gc_count > last_full_compact_gc_count) { dprintf (2, ("attempted to trigger an ephemeral GC and got a full compacting GC")); did_full_compact_gc = TRUE; } return did_full_compact_gc; } BOOL gc_heap::soh_try_fit (int gen_number, size_t size, alloc_context* acontext, uint32_t flags, int align_const, BOOL* commit_failed_p, BOOL* short_seg_end_p) { BOOL can_allocate = TRUE; if (short_seg_end_p) { *short_seg_end_p = FALSE; } can_allocate = a_fit_free_list_p (gen_number, size, acontext, flags, align_const); if (!can_allocate) { if (short_seg_end_p) { *short_seg_end_p = short_on_end_of_seg (ephemeral_heap_segment); } // If the caller doesn't care, we always try to fit at the end of seg; // otherwise we would only try if we are actually not short at end of seg. if (!short_seg_end_p || !(*short_seg_end_p)) { #ifdef USE_REGIONS while (ephemeral_heap_segment) #endif //USE_REGIONS { can_allocate = a_fit_segment_end_p (gen_number, ephemeral_heap_segment, size, acontext, flags, align_const, commit_failed_p); #ifdef USE_REGIONS if (can_allocate) { break; } dprintf (REGIONS_LOG, ("h%d fixing region %Ix end to alloc ptr: %Ix, alloc_allocated %Ix", heap_number, heap_segment_mem (ephemeral_heap_segment), acontext->alloc_ptr, alloc_allocated)); fix_allocation_context (acontext, TRUE, FALSE); fix_youngest_allocation_area(); heap_segment* next_seg = heap_segment_next (ephemeral_heap_segment); bool new_seg = false; if (!next_seg) { assert (ephemeral_heap_segment == generation_tail_region (generation_of (gen_number))); next_seg = get_new_region (gen_number); new_seg = true; } if (next_seg) { dprintf (REGIONS_LOG, ("eph seg %Ix -> next %Ix", heap_segment_mem (ephemeral_heap_segment), heap_segment_mem (next_seg))); ephemeral_heap_segment = next_seg; if (new_seg) { GCToEEInterface::DiagAddNewRegion( heap_segment_gen_num (next_seg), heap_segment_mem (next_seg), heap_segment_allocated (next_seg), heap_segment_reserved (next_seg) ); } } else { *commit_failed_p = TRUE; dprintf (REGIONS_LOG, ("couldn't get a new ephemeral region")); return FALSE; } alloc_allocated = heap_segment_allocated (ephemeral_heap_segment); dprintf (REGIONS_LOG, ("h%d alloc_allocated is now %Ix", heap_number, alloc_allocated)); #endif //USE_REGIONS } } } return can_allocate; } allocation_state gc_heap::allocate_soh (int gen_number, size_t size, alloc_context* acontext, uint32_t flags, int align_const) { #if defined (BACKGROUND_GC) && !defined (MULTIPLE_HEAPS) if (gc_heap::background_running_p()) { background_soh_alloc_count++; if ((background_soh_alloc_count % bgc_alloc_spin_count) == 0) { add_saved_spinlock_info (false, me_release, mt_alloc_small); leave_spin_lock (&more_space_lock_soh); bool cooperative_mode = enable_preemptive(); GCToOSInterface::Sleep (bgc_alloc_spin); disable_preemptive (cooperative_mode); enter_spin_lock (&more_space_lock_soh); add_saved_spinlock_info (false, me_acquire, mt_alloc_small); } else { //GCToOSInterface::YieldThread (0); } } #endif //BACKGROUND_GC && !MULTIPLE_HEAPS gc_reason gr = reason_oos_soh; oom_reason oom_r = oom_no_failure; // No variable values should be "carried over" from one state to the other. // That's why there are local variable for each state allocation_state soh_alloc_state = a_state_start; // If we can get a new seg it means allocation will succeed. while (1) { dprintf (3, ("[h%d]soh state is %s", heap_number, allocation_state_str[soh_alloc_state])); switch (soh_alloc_state) { case a_state_can_allocate: case a_state_cant_allocate: { goto exit; } case a_state_start: { soh_alloc_state = a_state_try_fit; break; } case a_state_try_fit: { BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, NULL); soh_alloc_state = (can_use_existing_p ? a_state_can_allocate : (commit_failed_p ? a_state_trigger_full_compact_gc : a_state_trigger_ephemeral_gc)); break; } case a_state_try_fit_after_bgc: { BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; BOOL short_seg_end_p = FALSE; can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &short_seg_end_p); soh_alloc_state = (can_use_existing_p ? a_state_can_allocate : (short_seg_end_p ? a_state_trigger_2nd_ephemeral_gc : a_state_trigger_full_compact_gc)); break; } case a_state_try_fit_after_cg: { BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; BOOL short_seg_end_p = FALSE; can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &short_seg_end_p); if (can_use_existing_p) { soh_alloc_state = a_state_can_allocate; } #ifdef MULTIPLE_HEAPS else if (gen0_allocated_after_gc_p) { // some other threads already grabbed the more space lock and allocated // so we should attempt an ephemeral GC again. soh_alloc_state = a_state_trigger_ephemeral_gc; } #endif //MULTIPLE_HEAPS else if (short_seg_end_p) { soh_alloc_state = a_state_cant_allocate; oom_r = oom_budget; } else { assert (commit_failed_p || heap_hard_limit); soh_alloc_state = a_state_cant_allocate; oom_r = oom_cant_commit; } break; } case a_state_check_and_wait_for_bgc: { BOOL bgc_in_progress_p = FALSE; BOOL did_full_compacting_gc = FALSE; bgc_in_progress_p = check_and_wait_for_bgc (awr_gen0_oos_bgc, &did_full_compacting_gc, false); soh_alloc_state = (did_full_compacting_gc ? a_state_try_fit_after_cg : a_state_try_fit_after_bgc); break; } case a_state_trigger_ephemeral_gc: { BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; BOOL short_seg_end_p = FALSE; BOOL bgc_in_progress_p = FALSE; BOOL did_full_compacting_gc = FALSE; did_full_compacting_gc = trigger_ephemeral_gc (gr); if (did_full_compacting_gc) { soh_alloc_state = a_state_try_fit_after_cg; } else { can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &short_seg_end_p); #ifdef BACKGROUND_GC bgc_in_progress_p = gc_heap::background_running_p(); #endif //BACKGROUND_GC if (can_use_existing_p) { soh_alloc_state = a_state_can_allocate; } else { if (short_seg_end_p) { if (should_expand_in_full_gc) { dprintf (2, ("gen1 GC wanted to expand!")); soh_alloc_state = a_state_trigger_full_compact_gc; } else { soh_alloc_state = (bgc_in_progress_p ? a_state_check_and_wait_for_bgc : a_state_trigger_full_compact_gc); } } else if (commit_failed_p) { soh_alloc_state = a_state_trigger_full_compact_gc; } else { #ifdef MULTIPLE_HEAPS // some other threads already grabbed the more space lock and allocated // so we should attempt an ephemeral GC again. assert (gen0_allocated_after_gc_p); soh_alloc_state = a_state_trigger_ephemeral_gc; #else //MULTIPLE_HEAPS assert (!"shouldn't get here"); #endif //MULTIPLE_HEAPS } } } break; } case a_state_trigger_2nd_ephemeral_gc: { BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; BOOL short_seg_end_p = FALSE; BOOL did_full_compacting_gc = FALSE; did_full_compacting_gc = trigger_ephemeral_gc (gr); if (did_full_compacting_gc) { soh_alloc_state = a_state_try_fit_after_cg; } else { can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &short_seg_end_p); if (short_seg_end_p || commit_failed_p) { soh_alloc_state = a_state_trigger_full_compact_gc; } else { assert (can_use_existing_p); soh_alloc_state = a_state_can_allocate; } } break; } case a_state_trigger_full_compact_gc: { if (fgn_maxgen_percent) { dprintf (2, ("FGN: SOH doing last GC before we throw OOM")); send_full_gc_notification (max_generation, FALSE); } BOOL got_full_compacting_gc = FALSE; got_full_compacting_gc = trigger_full_compact_gc (gr, &oom_r, false); soh_alloc_state = (got_full_compacting_gc ? a_state_try_fit_after_cg : a_state_cant_allocate); break; } default: { assert (!"Invalid state!"); break; } } } exit: if (soh_alloc_state == a_state_cant_allocate) { assert (oom_r != oom_no_failure); handle_oom (oom_r, size, heap_segment_allocated (ephemeral_heap_segment), heap_segment_reserved (ephemeral_heap_segment)); add_saved_spinlock_info (false, me_release, mt_alloc_small_cant); leave_spin_lock (&more_space_lock_soh); } assert ((soh_alloc_state == a_state_can_allocate) || (soh_alloc_state == a_state_cant_allocate) || (soh_alloc_state == a_state_retry_allocate)); return soh_alloc_state; } #ifdef BACKGROUND_GC inline void gc_heap::bgc_track_uoh_alloc() { if (current_c_gc_state == c_gc_state_planning) { Interlocked::Increment (&uoh_alloc_thread_count); dprintf (3, ("h%d: inc lc: %d", heap_number, (int32_t)uoh_alloc_thread_count)); } } inline void gc_heap::bgc_untrack_uoh_alloc() { if (current_c_gc_state == c_gc_state_planning) { Interlocked::Decrement (&uoh_alloc_thread_count); dprintf (3, ("h%d: dec lc: %d", heap_number, (int32_t)uoh_alloc_thread_count)); } } int bgc_allocate_spin(size_t min_gc_size, size_t bgc_begin_size, size_t bgc_size_increased, size_t end_size) { if ((bgc_begin_size + bgc_size_increased) < (min_gc_size * 10)) { // just do it, no spinning return 0; } if ((bgc_begin_size >= (2 * end_size)) || (bgc_size_increased >= bgc_begin_size)) { if (bgc_begin_size >= (2 * end_size)) { dprintf (3, ("alloc-ed too much before bgc started")); } else { dprintf (3, ("alloc-ed too much after bgc started")); } // -1 means wait for bgc return -1; } else { return (int)(((float)bgc_size_increased / (float)bgc_begin_size) * 10); } } int gc_heap::bgc_loh_allocate_spin() { size_t min_gc_size = dd_min_size (dynamic_data_of (loh_generation)); size_t bgc_begin_size = bgc_begin_loh_size; size_t bgc_size_increased = bgc_loh_size_increased; size_t end_size = end_loh_size; return bgc_allocate_spin(min_gc_size, bgc_begin_size, bgc_size_increased, end_size); } int gc_heap::bgc_poh_allocate_spin() { size_t min_gc_size = dd_min_size (dynamic_data_of (poh_generation)); size_t bgc_begin_size = bgc_begin_poh_size; size_t bgc_size_increased = bgc_poh_size_increased; size_t end_size = end_poh_size; return bgc_allocate_spin(min_gc_size, bgc_begin_size, bgc_size_increased, end_size); } #endif //BACKGROUND_GC size_t gc_heap::get_uoh_seg_size (size_t size) { size_t default_seg_size = #ifdef USE_REGIONS global_region_allocator.get_large_region_alignment(); #else min_uoh_segment_size; #endif //USE_REGIONS size_t align_size = default_seg_size; int align_const = get_alignment_constant (FALSE); size_t large_seg_size = align_on_page ( max (default_seg_size, ((size + 2 * Align(min_obj_size, align_const) + OS_PAGE_SIZE + align_size) / align_size * align_size))); return large_seg_size; } BOOL gc_heap::uoh_get_new_seg (int gen_number, size_t size, BOOL* did_full_compact_gc, oom_reason* oom_r) { *did_full_compact_gc = FALSE; size_t seg_size = get_uoh_seg_size (size); heap_segment* new_seg = get_uoh_segment (gen_number, seg_size, did_full_compact_gc); if (new_seg && (gen_number == loh_generation)) { loh_alloc_since_cg += seg_size; } else { *oom_r = oom_loh; } return (new_seg != 0); } // PERF TODO: this is too aggressive; and in hard limit we should // count the actual allocated bytes instead of only updating it during // getting a new seg. BOOL gc_heap::retry_full_compact_gc (size_t size) { size_t seg_size = get_uoh_seg_size (size); if (loh_alloc_since_cg >= (2 * (uint64_t)seg_size)) { return TRUE; } #ifdef MULTIPLE_HEAPS uint64_t total_alloc_size = 0; for (int i = 0; i < n_heaps; i++) { total_alloc_size += g_heaps[i]->loh_alloc_since_cg; } if (total_alloc_size >= (2 * (uint64_t)seg_size)) { return TRUE; } #endif //MULTIPLE_HEAPS return FALSE; } BOOL gc_heap::check_and_wait_for_bgc (alloc_wait_reason awr, BOOL* did_full_compact_gc, bool loh_p) { BOOL bgc_in_progress = FALSE; *did_full_compact_gc = FALSE; #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { bgc_in_progress = TRUE; size_t last_full_compact_gc_count = get_full_compact_gc_count(); wait_for_background (awr, loh_p); size_t current_full_compact_gc_count = get_full_compact_gc_count(); if (current_full_compact_gc_count > last_full_compact_gc_count) { *did_full_compact_gc = TRUE; } } #endif //BACKGROUND_GC return bgc_in_progress; } BOOL gc_heap::uoh_try_fit (int gen_number, size_t size, alloc_context* acontext, uint32_t flags, int align_const, BOOL* commit_failed_p, oom_reason* oom_r) { BOOL can_allocate = TRUE; if (!a_fit_free_list_uoh_p (size, acontext, flags, align_const, gen_number)) { can_allocate = uoh_a_fit_segment_end_p (gen_number, size, acontext, flags, align_const, commit_failed_p, oom_r); #ifdef BACKGROUND_GC if (can_allocate && gc_heap::background_running_p()) { if (gen_number == poh_generation) { bgc_poh_size_increased += size; } else { bgc_loh_size_increased += size; } } #endif //BACKGROUND_GC } return can_allocate; } BOOL gc_heap::trigger_full_compact_gc (gc_reason gr, oom_reason* oom_r, bool loh_p) { BOOL did_full_compact_gc = FALSE; size_t last_full_compact_gc_count = get_full_compact_gc_count(); // Set this so the next GC will be a full compacting GC. if (!last_gc_before_oom) { last_gc_before_oom = TRUE; } #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { wait_for_background (((gr == reason_oos_soh) ? awr_gen0_oos_bgc : awr_loh_oos_bgc), loh_p); dprintf (2, ("waited for BGC - done")); } #endif //BACKGROUND_GC GCSpinLock* msl = loh_p ? &more_space_lock_uoh : &more_space_lock_soh; size_t current_full_compact_gc_count = get_full_compact_gc_count(); if (current_full_compact_gc_count > last_full_compact_gc_count) { dprintf (3, ("a full compacting GC triggered while waiting for BGC (%d->%d)", last_full_compact_gc_count, current_full_compact_gc_count)); assert (current_full_compact_gc_count > last_full_compact_gc_count); did_full_compact_gc = TRUE; goto exit; } dprintf (3, ("h%d full GC", heap_number)); trigger_gc_for_alloc (max_generation, gr, msl, loh_p, mt_t_full_gc); current_full_compact_gc_count = get_full_compact_gc_count(); if (current_full_compact_gc_count == last_full_compact_gc_count) { dprintf (2, ("attempted to trigger a full compacting GC but didn't get it")); // We requested a full GC but didn't get because of the elevation logic // which means we should fail. *oom_r = oom_unproductive_full_gc; } else { dprintf (3, ("h%d: T full compacting GC (%d->%d)", heap_number, last_full_compact_gc_count, current_full_compact_gc_count)); assert (current_full_compact_gc_count > last_full_compact_gc_count); did_full_compact_gc = TRUE; } exit: return did_full_compact_gc; } #ifdef RECORD_LOH_STATE void gc_heap::add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id) { // When the state is can_allocate we already have released the more // space lock. So we are not logging states here since this code // is not thread safe. if (loh_state_to_save != a_state_can_allocate) { last_loh_states[loh_state_index].alloc_state = loh_state_to_save; last_loh_states[loh_state_index].thread_id = thread_id; loh_state_index++; if (loh_state_index == max_saved_loh_states) { loh_state_index = 0; } assert (loh_state_index < max_saved_loh_states); } } #endif //RECORD_LOH_STATE bool gc_heap::should_retry_other_heap (int gen_number, size_t size) { #ifdef MULTIPLE_HEAPS if (heap_hard_limit) { size_t min_size = dd_min_size (g_heaps[0]->dynamic_data_of (gen_number)); size_t slack_space = max (commit_min_th, min_size); bool retry_p = ((current_total_committed + size) < (heap_hard_limit - slack_space)); dprintf (1, ("%Id - %Id - total committed %Id - size %Id = %Id, %s", heap_hard_limit, slack_space, current_total_committed, size, (heap_hard_limit - slack_space - current_total_committed - size), (retry_p ? "retry" : "no retry"))); return retry_p; } else #endif //MULTIPLE_HEAPS { return false; } } allocation_state gc_heap::allocate_uoh (int gen_number, size_t size, alloc_context* acontext, uint32_t flags, int align_const) { #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { #ifdef BGC_SERVO_TUNING bool planning_p = (current_c_gc_state == c_gc_state_planning); #endif //BGC_SERVO_TUNING background_uoh_alloc_count++; //if ((background_loh_alloc_count % bgc_alloc_spin_count_loh) == 0) { #ifdef BGC_SERVO_TUNING if (planning_p) { loh_a_bgc_planning += size; } else { loh_a_bgc_marking += size; } #endif //BGC_SERVO_TUNING int spin_for_allocation = (gen_number == loh_generation) ? bgc_loh_allocate_spin() : bgc_poh_allocate_spin(); if (spin_for_allocation > 0) { add_saved_spinlock_info (true, me_release, mt_alloc_large); leave_spin_lock (&more_space_lock_uoh); bool cooperative_mode = enable_preemptive(); GCToOSInterface::YieldThread (spin_for_allocation); disable_preemptive (cooperative_mode); enter_spin_lock (&more_space_lock_uoh); add_saved_spinlock_info (true, me_acquire, mt_alloc_large); dprintf (SPINLOCK_LOG, ("[%d]spin Emsl uoh", heap_number)); } else if (spin_for_allocation < 0) { wait_for_background (awr_uoh_alloc_during_bgc, true); } } } #ifdef BGC_SERVO_TUNING else { loh_a_no_bgc += size; } #endif //BGC_SERVO_TUNING #endif //BACKGROUND_GC gc_reason gr = reason_oos_loh; generation* gen = generation_of (gen_number); oom_reason oom_r = oom_no_failure; size_t current_full_compact_gc_count = 0; // No variable values should be "carried over" from one state to the other. // That's why there are local variable for each state allocation_state uoh_alloc_state = a_state_start; #ifdef RECORD_LOH_STATE EEThreadId current_thread_id; current_thread_id.SetToCurrentThread(); #endif //RECORD_LOH_STATE // If we can get a new seg it means allocation will succeed. while (1) { dprintf (3, ("[h%d]loh state is %s", heap_number, allocation_state_str[uoh_alloc_state])); #ifdef RECORD_LOH_STATE add_saved_loh_state (loh_uoh_alloc_state, current_thread_id); #endif //RECORD_LOH_STATE switch (uoh_alloc_state) { case a_state_can_allocate: case a_state_cant_allocate: { goto exit; } case a_state_start: { uoh_alloc_state = a_state_try_fit; break; } case a_state_try_fit: { BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; can_use_existing_p = uoh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &oom_r); uoh_alloc_state = (can_use_existing_p ? a_state_can_allocate : (commit_failed_p ? a_state_trigger_full_compact_gc : a_state_acquire_seg)); assert ((uoh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0)); break; } case a_state_try_fit_new_seg: { BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; can_use_existing_p = uoh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &oom_r); // Even after we got a new seg it doesn't necessarily mean we can allocate, // another LOH allocating thread could have beat us to acquire the msl so // we need to try again. uoh_alloc_state = (can_use_existing_p ? a_state_can_allocate : a_state_try_fit); assert ((uoh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0)); break; } case a_state_try_fit_after_cg: { BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; can_use_existing_p = uoh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &oom_r); // If we failed to commit, we bail right away 'cause we already did a // full compacting GC. uoh_alloc_state = (can_use_existing_p ? a_state_can_allocate : (commit_failed_p ? a_state_cant_allocate : a_state_acquire_seg_after_cg)); assert ((uoh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0)); break; } case a_state_try_fit_after_bgc: { BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; can_use_existing_p = uoh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &oom_r); uoh_alloc_state = (can_use_existing_p ? a_state_can_allocate : (commit_failed_p ? a_state_trigger_full_compact_gc : a_state_acquire_seg_after_bgc)); assert ((uoh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0)); break; } case a_state_acquire_seg: { BOOL can_get_new_seg_p = FALSE; BOOL did_full_compacting_gc = FALSE; current_full_compact_gc_count = get_full_compact_gc_count(); can_get_new_seg_p = uoh_get_new_seg (gen_number, size, &did_full_compacting_gc, &oom_r); uoh_alloc_state = (can_get_new_seg_p ? a_state_try_fit_new_seg : (did_full_compacting_gc ? a_state_check_retry_seg : a_state_check_and_wait_for_bgc)); break; } case a_state_acquire_seg_after_cg: { BOOL can_get_new_seg_p = FALSE; BOOL did_full_compacting_gc = FALSE; current_full_compact_gc_count = get_full_compact_gc_count(); can_get_new_seg_p = uoh_get_new_seg (gen_number, size, &did_full_compacting_gc, &oom_r); // Since we release the msl before we try to allocate a seg, other // threads could have allocated a bunch of segments before us so // we might need to retry. uoh_alloc_state = (can_get_new_seg_p ? a_state_try_fit_after_cg : a_state_check_retry_seg); break; } case a_state_acquire_seg_after_bgc: { BOOL can_get_new_seg_p = FALSE; BOOL did_full_compacting_gc = FALSE; current_full_compact_gc_count = get_full_compact_gc_count(); can_get_new_seg_p = uoh_get_new_seg (gen_number, size, &did_full_compacting_gc, &oom_r); uoh_alloc_state = (can_get_new_seg_p ? a_state_try_fit_new_seg : (did_full_compacting_gc ? a_state_check_retry_seg : a_state_trigger_full_compact_gc)); assert ((uoh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure)); break; } case a_state_check_and_wait_for_bgc: { BOOL bgc_in_progress_p = FALSE; BOOL did_full_compacting_gc = FALSE; bgc_in_progress_p = check_and_wait_for_bgc (awr_loh_oos_bgc, &did_full_compacting_gc, true); uoh_alloc_state = (!bgc_in_progress_p ? a_state_trigger_full_compact_gc : (did_full_compacting_gc ? a_state_try_fit_after_cg : a_state_try_fit_after_bgc)); break; } case a_state_trigger_full_compact_gc: { if (fgn_maxgen_percent) { dprintf (2, ("FGN: LOH doing last GC before we throw OOM")); send_full_gc_notification (max_generation, FALSE); } BOOL got_full_compacting_gc = FALSE; got_full_compacting_gc = trigger_full_compact_gc (gr, &oom_r, true); uoh_alloc_state = (got_full_compacting_gc ? a_state_try_fit_after_cg : a_state_cant_allocate); assert ((uoh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure)); break; } case a_state_check_retry_seg: { BOOL should_retry_gc = retry_full_compact_gc (size); BOOL should_retry_get_seg = FALSE; if (!should_retry_gc) { size_t last_full_compact_gc_count = current_full_compact_gc_count; current_full_compact_gc_count = get_full_compact_gc_count(); if (current_full_compact_gc_count > last_full_compact_gc_count) { should_retry_get_seg = TRUE; } } uoh_alloc_state = (should_retry_gc ? a_state_trigger_full_compact_gc : (should_retry_get_seg ? a_state_try_fit_after_cg : a_state_cant_allocate)); assert ((uoh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure)); break; } default: { assert (!"Invalid state!"); break; } } } exit: if (uoh_alloc_state == a_state_cant_allocate) { assert (oom_r != oom_no_failure); if ((oom_r != oom_cant_commit) && should_retry_other_heap (gen_number, size)) { uoh_alloc_state = a_state_retry_allocate; } else { handle_oom (oom_r, size, 0, 0); } add_saved_spinlock_info (true, me_release, mt_alloc_large_cant); leave_spin_lock (&more_space_lock_uoh); } assert ((uoh_alloc_state == a_state_can_allocate) || (uoh_alloc_state == a_state_cant_allocate) || (uoh_alloc_state == a_state_retry_allocate)); return uoh_alloc_state; } // BGC's final mark phase will acquire the msl, so release it here and re-acquire. void gc_heap::trigger_gc_for_alloc (int gen_number, gc_reason gr, GCSpinLock* msl, bool loh_p, msl_take_state take_state) { #ifdef BACKGROUND_GC if (loh_p) { add_saved_spinlock_info (loh_p, me_release, take_state); leave_spin_lock (msl); } #endif //BACKGROUND_GC vm_heap->GarbageCollectGeneration (gen_number, gr); #ifdef MULTIPLE_HEAPS if (!loh_p) { enter_spin_lock (msl); add_saved_spinlock_info (loh_p, me_acquire, take_state); } #endif //MULTIPLE_HEAPS #ifdef BACKGROUND_GC if (loh_p) { enter_spin_lock (msl); add_saved_spinlock_info (loh_p, me_acquire, take_state); } #endif //BACKGROUND_GC } inline bool gc_heap::update_alloc_info (int gen_number, size_t allocated_size, size_t* etw_allocation_amount) { bool exceeded_p = false; int oh_index = gen_to_oh (gen_number); allocated_since_last_gc[oh_index] += allocated_size; size_t& etw_allocated = etw_allocation_running_amount[oh_index]; etw_allocated += allocated_size; if (etw_allocated > etw_allocation_tick) { *etw_allocation_amount = etw_allocated; exceeded_p = true; etw_allocated = 0; } return exceeded_p; } allocation_state gc_heap::try_allocate_more_space (alloc_context* acontext, size_t size, uint32_t flags, int gen_number) { if (gc_heap::gc_started) { wait_for_gc_done(); return a_state_retry_allocate; } bool loh_p = (gen_number > 0); GCSpinLock* msl = loh_p ? &more_space_lock_uoh : &more_space_lock_soh; #ifdef SYNCHRONIZATION_STATS int64_t msl_acquire_start = GCToOSInterface::QueryPerformanceCounter(); #endif //SYNCHRONIZATION_STATS enter_spin_lock (msl); add_saved_spinlock_info (loh_p, me_acquire, mt_try_alloc); dprintf (SPINLOCK_LOG, ("[%d]Emsl for alloc", heap_number)); #ifdef SYNCHRONIZATION_STATS int64_t msl_acquire = GCToOSInterface::QueryPerformanceCounter() - msl_acquire_start; total_msl_acquire += msl_acquire; num_msl_acquired++; if (msl_acquire > 200) { num_high_msl_acquire++; } else { num_low_msl_acquire++; } #endif //SYNCHRONIZATION_STATS /* // We are commenting this out 'cause we don't see the point - we already // have checked gc_started when we were acquiring the msl - no need to check // again. This complicates the logic in bgc_suspend_EE 'cause that one would // need to release msl which causes all sorts of trouble. if (gc_heap::gc_started) { #ifdef SYNCHRONIZATION_STATS good_suspension++; #endif //SYNCHRONIZATION_STATS BOOL fStress = (g_pConfig->GetGCStressLevel() & GCConfig::GCSTRESS_TRANSITION) != 0; if (!fStress) { //Rendez vous early (MP scaling issue) //dprintf (1, ("[%d]waiting for gc", heap_number)); wait_for_gc_done(); #ifdef MULTIPLE_HEAPS return -1; #endif //MULTIPLE_HEAPS } } */ dprintf (3, ("requested to allocate %d bytes on gen%d", size, gen_number)); int align_const = get_alignment_constant (gen_number <= max_generation); if (fgn_maxgen_percent) { check_for_full_gc (gen_number, size); } #ifdef BGC_SERVO_TUNING if ((gen_number != 0) && bgc_tuning::should_trigger_bgc_loh()) { trigger_gc_for_alloc (max_generation, reason_bgc_tuning_loh, msl, loh_p, mt_try_servo_budget); } else #endif //BGC_SERVO_TUNING { bool trigger_on_budget_loh_p = #ifdef BGC_SERVO_TUNING !bgc_tuning::enable_fl_tuning; #else true; #endif //BGC_SERVO_TUNING bool check_budget_p = true; if (gen_number != 0) { check_budget_p = trigger_on_budget_loh_p; } if (check_budget_p && !(new_allocation_allowed (gen_number))) { if (fgn_maxgen_percent && (gen_number == 0)) { // We only check gen0 every so often, so take this opportunity to check again. check_for_full_gc (gen_number, size); } #ifdef BACKGROUND_GC bool recheck_p = wait_for_bgc_high_memory (awr_gen0_alloc, loh_p); #endif //BACKGROUND_GC #ifdef SYNCHRONIZATION_STATS bad_suspension++; #endif //SYNCHRONIZATION_STATS dprintf (2, ("h%d running out of budget on gen%d, gc", heap_number, gen_number)); #ifdef BACKGROUND_GC bool trigger_gc_p = true; if (recheck_p) trigger_gc_p = !(new_allocation_allowed (gen_number)); if (trigger_gc_p) #endif //BACKGROUND_GC { if (!settings.concurrent || (gen_number == 0)) { trigger_gc_for_alloc (0, ((gen_number == 0) ? reason_alloc_soh : reason_alloc_loh), msl, loh_p, mt_try_budget); } } } } allocation_state can_allocate = ((gen_number == 0) ? allocate_soh (gen_number, size, acontext, flags, align_const) : allocate_uoh (gen_number, size, acontext, flags, align_const)); return can_allocate; } #ifdef MULTIPLE_HEAPS void gc_heap::balance_heaps (alloc_context* acontext) { if (acontext->alloc_count < 4) { if (acontext->alloc_count == 0) { int home_hp_num = heap_select::select_heap (acontext); acontext->set_home_heap (GCHeap::GetHeap (home_hp_num)); gc_heap* hp = acontext->get_home_heap ()->pGenGCHeap; acontext->set_alloc_heap (acontext->get_home_heap ()); hp->alloc_context_count++; #ifdef HEAP_BALANCE_INSTRUMENTATION uint16_t ideal_proc_no = 0; GCToOSInterface::GetCurrentThreadIdealProc (&ideal_proc_no); uint32_t proc_no = GCToOSInterface::GetCurrentProcessorNumber (); add_to_hb_numa (proc_no, ideal_proc_no, home_hp_num, false, true, false); dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPafter GC: 1st alloc on p%3d, h%d, ip: %d", proc_no, home_hp_num, ideal_proc_no)); #endif //HEAP_BALANCE_INSTRUMENTATION } } else { BOOL set_home_heap = FALSE; gc_heap* home_hp = NULL; int proc_hp_num = 0; #ifdef HEAP_BALANCE_INSTRUMENTATION bool alloc_count_p = true; bool multiple_procs_p = false; bool set_ideal_p = false; uint32_t proc_no = GCToOSInterface::GetCurrentProcessorNumber (); uint32_t last_proc_no = proc_no; #endif //HEAP_BALANCE_INSTRUMENTATION if (heap_select::can_find_heap_fast ()) { assert (acontext->get_home_heap () != NULL); home_hp = acontext->get_home_heap ()->pGenGCHeap; proc_hp_num = heap_select::select_heap (acontext); if (home_hp != gc_heap::g_heaps[proc_hp_num]) { #ifdef HEAP_BALANCE_INSTRUMENTATION alloc_count_p = false; #endif //HEAP_BALANCE_INSTRUMENTATION set_home_heap = TRUE; } else if ((acontext->alloc_count & 15) == 0) set_home_heap = TRUE; } else { if ((acontext->alloc_count & 3) == 0) set_home_heap = TRUE; } if (set_home_heap) { /* // Since we are balancing up to MAX_SUPPORTED_CPUS, no need for this. if (n_heaps > MAX_SUPPORTED_CPUS) { // on machines with many processors cache affinity is really king, so don't even try // to balance on these. acontext->home_heap = GCHeap::GetHeap( heap_select::select_heap(acontext)); acontext->alloc_heap = acontext->home_heap; } else */ { gc_heap* org_hp = acontext->get_alloc_heap ()->pGenGCHeap; int org_hp_num = org_hp->heap_number; int final_alloc_hp_num = org_hp_num; dynamic_data* dd = org_hp->dynamic_data_of (0); ptrdiff_t org_size = dd_new_allocation (dd); ptrdiff_t total_size = (ptrdiff_t)dd_desired_allocation (dd); #ifdef HEAP_BALANCE_INSTRUMENTATION dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMP[p%3d] ph h%3d, hh: %3d, ah: %3d (%dmb-%dmb), ac: %5d(%s)", proc_no, proc_hp_num, home_hp->heap_number, org_hp_num, (total_size / 1024 / 1024), (org_size / 1024 / 1024), acontext->alloc_count, ((proc_hp_num == home_hp->heap_number) ? "AC" : "H"))); #endif //HEAP_BALANCE_INSTRUMENTATION int org_alloc_context_count; int max_alloc_context_count; gc_heap* max_hp; int max_hp_num = 0; ptrdiff_t max_size; size_t local_delta = max (((size_t)org_size >> 6), min_gen0_balance_delta); size_t delta = local_delta; if (((size_t)org_size + 2 * delta) >= (size_t)total_size) { acontext->alloc_count++; return; } #ifdef HEAP_BALANCE_INSTRUMENTATION proc_no = GCToOSInterface::GetCurrentProcessorNumber (); if (proc_no != last_proc_no) { dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPSP: %d->%d", last_proc_no, proc_no)); multiple_procs_p = true; last_proc_no = proc_no; } int new_home_hp_num = heap_select::proc_no_to_heap_no[proc_no]; #else int new_home_hp_num = heap_select::select_heap(acontext); #endif //HEAP_BALANCE_INSTRUMENTATION gc_heap* new_home_hp = gc_heap::g_heaps[new_home_hp_num]; acontext->set_home_heap (new_home_hp->vm_heap); int start, end, finish; heap_select::get_heap_range_for_heap (new_home_hp_num, &start, &end); finish = start + n_heaps; do { max_hp = org_hp; max_hp_num = org_hp_num; max_size = org_size + delta; org_alloc_context_count = org_hp->alloc_context_count; max_alloc_context_count = org_alloc_context_count; if (org_hp == new_home_hp) max_size = max_size + delta; if (max_alloc_context_count > 1) max_size /= max_alloc_context_count; // check if the new home heap has more space if (org_hp != new_home_hp) { dd = new_home_hp->dynamic_data_of(0); ptrdiff_t size = dd_new_allocation(dd); // favor new home heap over org heap size += delta * 2; int new_home_hp_alloc_context_count = new_home_hp->alloc_context_count; if (new_home_hp_alloc_context_count > 0) size /= (new_home_hp_alloc_context_count + 1); if (size > max_size) { #ifdef HEAP_BALANCE_INSTRUMENTATION dprintf(HEAP_BALANCE_TEMP_LOG, ("TEMPorg h%d(%dmb), m h%d(%dmb)", org_hp_num, (max_size / 1024 / 1024), new_home_hp_num, (size / 1024 / 1024))); #endif //HEAP_BALANCE_INSTRUMENTATION max_hp = new_home_hp; max_size = size; max_hp_num = new_home_hp_num; max_alloc_context_count = new_home_hp_alloc_context_count; } } // consider heaps both inside our local NUMA node, // and outside, but with different thresholds enum { LOCAL_NUMA_NODE, REMOTE_NUMA_NODE }; for (int pass = LOCAL_NUMA_NODE; pass <= REMOTE_NUMA_NODE; pass++) { int count = end - start; int max_tries = min(count, 4); // we will consider max_tries consecutive (in a circular sense) // other heaps from a semi random starting point // alloc_count often increases by multiples of 16 (due to logic at top of routine), // and we want to advance the starting point by 4 between successive calls, // therefore the shift right by 2 bits int heap_num = start + ((acontext->alloc_count >> 2) + new_home_hp_num) % count; #ifdef HEAP_BALANCE_INSTRUMENTATION dprintf(HEAP_BALANCE_TEMP_LOG, ("TEMP starting at h%d (home_heap_num = %d, alloc_count = %d)", heap_num, new_home_hp_num, acontext->alloc_count)); #endif //HEAP_BALANCE_INSTRUMENTATION for (int tries = max_tries; --tries >= 0; heap_num++) { // wrap around if we hit the end of our range if (heap_num >= end) heap_num -= count; // wrap around if we hit the end of the heap numbers if (heap_num >= n_heaps) heap_num -= n_heaps; assert (heap_num < n_heaps); gc_heap* hp = gc_heap::g_heaps[heap_num]; dd = hp->dynamic_data_of(0); ptrdiff_t size = dd_new_allocation(dd); #ifdef HEAP_BALANCE_INSTRUMENTATION dprintf(HEAP_BALANCE_TEMP_LOG, ("TEMP looking at h%d(%dmb)", heap_num, (size / 1024 / 1024))); #endif //HEAP_BALANCE_INSTRUMENTATION // if the size is not bigger than what we already have, // give up immediately, as it can't be a winner... // this is a micro-optimization to avoid fetching the // alloc_context_count and possibly dividing by it if (size <= max_size) continue; int hp_alloc_context_count = hp->alloc_context_count; if (hp_alloc_context_count > 0) { size /= (hp_alloc_context_count + 1); } if (size > max_size) { #ifdef HEAP_BALANCE_INSTRUMENTATION dprintf(HEAP_BALANCE_TEMP_LOG, ("TEMPorg h%d(%dmb), m h%d(%dmb)", org_hp_num, (max_size / 1024 / 1024), hp->heap_number, (size / 1024 / 1024))); #endif //HEAP_BALANCE_INSTRUMENTATION max_hp = hp; max_size = size; max_hp_num = max_hp->heap_number; max_alloc_context_count = hp_alloc_context_count; } } if ((max_hp == org_hp) && (end < finish)) { start = end; end = finish; delta = local_delta * 2; // Make it twice as hard to balance to remote nodes on NUMA. } else { // we already found a better heap, or there are no remote NUMA nodes break; } } } while (org_alloc_context_count != org_hp->alloc_context_count || max_alloc_context_count != max_hp->alloc_context_count); #ifdef HEAP_BALANCE_INSTRUMENTATION uint16_t ideal_proc_no_before_set_ideal = 0; GCToOSInterface::GetCurrentThreadIdealProc (&ideal_proc_no_before_set_ideal); #endif //HEAP_BALANCE_INSTRUMENTATION if (max_hp != org_hp) { final_alloc_hp_num = max_hp->heap_number; org_hp->alloc_context_count--; max_hp->alloc_context_count++; acontext->set_alloc_heap (GCHeap::GetHeap (final_alloc_hp_num)); if (!gc_thread_no_affinitize_p) { uint16_t src_proc_no = heap_select::find_proc_no_from_heap_no (org_hp->heap_number); uint16_t dst_proc_no = heap_select::find_proc_no_from_heap_no (max_hp->heap_number); dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPSW! h%d(p%d)->h%d(p%d)", org_hp_num, src_proc_no, final_alloc_hp_num, dst_proc_no)); #ifdef HEAP_BALANCE_INSTRUMENTATION int current_proc_no_before_set_ideal = GCToOSInterface::GetCurrentProcessorNumber (); if (current_proc_no_before_set_ideal != last_proc_no) { dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPSPa: %d->%d", last_proc_no, current_proc_no_before_set_ideal)); multiple_procs_p = true; } #endif //HEAP_BALANCE_INSTRUMENTATION if (!GCToOSInterface::SetCurrentThreadIdealAffinity (src_proc_no, dst_proc_no)) { dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPFailed to set the ideal processor for heap %d %d->%d", org_hp->heap_number, (int)src_proc_no, (int)dst_proc_no)); } #ifdef HEAP_BALANCE_INSTRUMENTATION else { set_ideal_p = true; } #endif //HEAP_BALANCE_INSTRUMENTATION } } #ifdef HEAP_BALANCE_INSTRUMENTATION add_to_hb_numa (proc_no, ideal_proc_no_before_set_ideal, final_alloc_hp_num, multiple_procs_p, alloc_count_p, set_ideal_p); #endif //HEAP_BALANCE_INSTRUMENTATION } } } acontext->alloc_count++; } ptrdiff_t gc_heap::get_balance_heaps_uoh_effective_budget (int generation_num) { if (heap_hard_limit) { const ptrdiff_t free_list_space = generation_free_list_space (generation_of (generation_num)); heap_segment* seg = generation_start_segment (generation_of (generation_num)); assert (heap_segment_next (seg) == nullptr); const ptrdiff_t allocated = heap_segment_allocated (seg) - seg->mem; // We could calculate the actual end_of_seg_space by taking reserved - allocated, // but all heaps have the same reserved memory and this value is only used for comparison. return free_list_space - allocated; } else { return dd_new_allocation (dynamic_data_of (generation_num)); } } gc_heap* gc_heap::balance_heaps_uoh (alloc_context* acontext, size_t alloc_size, int generation_num) { const int home_hp_num = heap_select::select_heap(acontext); dprintf (3, ("[h%d] LA: %Id", home_hp_num, alloc_size)); gc_heap* home_hp = GCHeap::GetHeap(home_hp_num)->pGenGCHeap; dynamic_data* dd = home_hp->dynamic_data_of (generation_num); const ptrdiff_t home_hp_size = home_hp->get_balance_heaps_uoh_effective_budget (generation_num); size_t delta = dd_min_size (dd) / 2; int start, end; heap_select::get_heap_range_for_heap(home_hp_num, &start, &end); const int finish = start + n_heaps; try_again: gc_heap* max_hp = home_hp; ptrdiff_t max_size = home_hp_size + delta; dprintf (3, ("home hp: %d, max size: %d", home_hp_num, max_size)); for (int i = start; i < end; i++) { gc_heap* hp = GCHeap::GetHeap(i%n_heaps)->pGenGCHeap; const ptrdiff_t size = hp->get_balance_heaps_uoh_effective_budget (generation_num); dprintf (3, ("hp: %d, size: %d", hp->heap_number, size)); if (size > max_size) { max_hp = hp; max_size = size; dprintf (3, ("max hp: %d, max size: %d", max_hp->heap_number, max_size)); } } if ((max_hp == home_hp) && (end < finish)) { start = end; end = finish; delta = dd_min_size (dd) * 3 / 2; // Make it harder to balance to remote nodes on NUMA. goto try_again; } if (max_hp != home_hp) { dprintf (3, ("uoh: %d(%Id)->%d(%Id)", home_hp->heap_number, dd_new_allocation (home_hp->dynamic_data_of (generation_num)), max_hp->heap_number, dd_new_allocation (max_hp->dynamic_data_of (generation_num)))); } return max_hp; } gc_heap* gc_heap::balance_heaps_uoh_hard_limit_retry (alloc_context* acontext, size_t alloc_size, int generation_num) { assert (heap_hard_limit); const int home_heap = heap_select::select_heap(acontext); dprintf (3, ("[h%d] balance_heaps_loh_hard_limit_retry alloc_size: %d", home_heap, alloc_size)); int start, end; heap_select::get_heap_range_for_heap (home_heap, &start, &end); const int finish = start + n_heaps; gc_heap* max_hp = nullptr; size_t max_end_of_seg_space = alloc_size; // Must be more than this much, or return NULL try_again: { for (int i = start; i < end; i++) { gc_heap* hp = GCHeap::GetHeap (i%n_heaps)->pGenGCHeap; heap_segment* seg = generation_start_segment (hp->generation_of (generation_num)); // With a hard limit, there is only one segment. assert (heap_segment_next (seg) == nullptr); const size_t end_of_seg_space = heap_segment_reserved (seg) - heap_segment_allocated (seg); if (end_of_seg_space >= max_end_of_seg_space) { dprintf (3, ("Switching heaps in hard_limit_retry! To: [h%d], New end_of_seg_space: %d", hp->heap_number, end_of_seg_space)); max_end_of_seg_space = end_of_seg_space; max_hp = hp; } } } // Only switch to a remote NUMA node if we didn't find space on this one. if ((max_hp == nullptr) && (end < finish)) { start = end; end = finish; goto try_again; } return max_hp; } #endif //MULTIPLE_HEAPS BOOL gc_heap::allocate_more_space(alloc_context* acontext, size_t size, uint32_t flags, int alloc_generation_number) { allocation_state status = a_state_start; do { #ifdef MULTIPLE_HEAPS if (alloc_generation_number == 0) { balance_heaps (acontext); status = acontext->get_alloc_heap()->pGenGCHeap->try_allocate_more_space (acontext, size, flags, alloc_generation_number); } else { gc_heap* alloc_heap; if (heap_hard_limit && (status == a_state_retry_allocate)) { alloc_heap = balance_heaps_uoh_hard_limit_retry (acontext, size, alloc_generation_number); if (alloc_heap == nullptr) { return false; } } else { alloc_heap = balance_heaps_uoh (acontext, size, alloc_generation_number); } status = alloc_heap->try_allocate_more_space (acontext, size, flags, alloc_generation_number); if (status == a_state_retry_allocate) { dprintf (3, ("UOH h%d alloc retry!", alloc_heap->heap_number)); } } #else status = try_allocate_more_space (acontext, size, flags, alloc_generation_number); #endif //MULTIPLE_HEAPS } while (status == a_state_retry_allocate); return (status == a_state_can_allocate); } inline CObjectHeader* gc_heap::allocate (size_t jsize, alloc_context* acontext, uint32_t flags) { size_t size = Align (jsize); assert (size >= Align (min_obj_size)); { retry: uint8_t* result = acontext->alloc_ptr; acontext->alloc_ptr+=size; if (acontext->alloc_ptr <= acontext->alloc_limit) { CObjectHeader* obj = (CObjectHeader*)result; assert (obj != 0); return obj; } else { acontext->alloc_ptr -= size; #ifdef _MSC_VER #pragma inline_depth(0) #endif //_MSC_VER if (! allocate_more_space (acontext, size, flags, 0)) return 0; #ifdef _MSC_VER #pragma inline_depth(20) #endif //_MSC_VER goto retry; } } } void gc_heap::leave_allocation_segment (generation* gen) { adjust_limit (0, 0, gen); } void gc_heap::init_free_and_plug() { #ifdef FREE_USAGE_STATS int i = (settings.concurrent ? max_generation : 0); for (; i <= settings.condemned_generation; i++) { generation* gen = generation_of (i); #ifdef DOUBLY_LINKED_FL print_free_and_plug ("BGC"); #else memset (gen->gen_free_spaces, 0, sizeof (gen->gen_free_spaces)); #endif //DOUBLY_LINKED_FL memset (gen->gen_plugs, 0, sizeof (gen->gen_plugs)); memset (gen->gen_current_pinned_free_spaces, 0, sizeof (gen->gen_current_pinned_free_spaces)); } if (settings.condemned_generation != max_generation) { for (int i = (settings.condemned_generation + 1); i <= max_generation; i++) { generation* gen = generation_of (i); memset (gen->gen_plugs, 0, sizeof (gen->gen_plugs)); } } #endif //FREE_USAGE_STATS } void gc_heap::print_free_and_plug (const char* msg) { #ifdef FREE_USAGE_STATS int older_gen = ((settings.condemned_generation == max_generation) ? max_generation : (settings.condemned_generation + 1)); for (int i = 0; i <= older_gen; i++) { generation* gen = generation_of (i); for (int j = 0; j < NUM_GEN_POWER2; j++) { if ((gen->gen_free_spaces[j] != 0) || (gen->gen_plugs[j] != 0)) { dprintf (2, ("[%s][h%d][%s#%d]gen%d: 2^%d: F: %Id, P: %Id", msg, heap_number, (settings.concurrent ? "BGC" : "GC"), settings.gc_index, i, (j + 9), gen->gen_free_spaces[j], gen->gen_plugs[j])); } } } #else UNREFERENCED_PARAMETER(msg); #endif //FREE_USAGE_STATS } // replace with allocator::first_suitable_bucket int gc_heap::find_bucket (size_t size) { size_t sz = BASE_GEN_SIZE; int i = 0; for (; i < (NUM_GEN_POWER2 - 1); i++) { if (size < sz) { break; } sz = sz * 2; } return i; } void gc_heap::add_gen_plug (int gen_number, size_t plug_size) { #ifdef FREE_USAGE_STATS dprintf (3, ("adding plug size %Id to gen%d", plug_size, gen_number)); generation* gen = generation_of (gen_number); size_t sz = BASE_GEN_SIZE; int i = find_bucket (plug_size); (gen->gen_plugs[i])++; #else UNREFERENCED_PARAMETER(gen_number); UNREFERENCED_PARAMETER(plug_size); #endif //FREE_USAGE_STATS } void gc_heap::add_item_to_current_pinned_free (int gen_number, size_t free_size) { #ifdef FREE_USAGE_STATS generation* gen = generation_of (gen_number); size_t sz = BASE_GEN_SIZE; int i = find_bucket (free_size); (gen->gen_current_pinned_free_spaces[i])++; generation_pinned_free_obj_space (gen) += free_size; dprintf (3, ("left pin free %Id(2^%d) to gen%d, total %Id bytes (%Id)", free_size, (i + 10), gen_number, generation_pinned_free_obj_space (gen), gen->gen_current_pinned_free_spaces[i])); #else UNREFERENCED_PARAMETER(gen_number); UNREFERENCED_PARAMETER(free_size); #endif //FREE_USAGE_STATS } // This is only for items large enough to be on the FL // Ideally we should keep track of smaller ones too but for now // it's easier to make the accounting right void gc_heap::add_gen_free (int gen_number, size_t free_size) { #ifdef FREE_USAGE_STATS dprintf (3, ("adding free size %Id to gen%d", free_size, gen_number)); if (free_size < min_free_list) return; generation* gen = generation_of (gen_number); size_t sz = BASE_GEN_SIZE; int i = find_bucket (free_size); (gen->gen_free_spaces[i])++; if (gen_number == max_generation) { dprintf (3, ("Mb b%d: f+ %Id (%Id)", i, free_size, gen->gen_free_spaces[i])); } #else UNREFERENCED_PARAMETER(gen_number); UNREFERENCED_PARAMETER(free_size); #endif //FREE_USAGE_STATS } void gc_heap::remove_gen_free (int gen_number, size_t free_size) { #ifdef FREE_USAGE_STATS dprintf (3, ("removing free %Id from gen%d", free_size, gen_number)); if (free_size < min_free_list) return; generation* gen = generation_of (gen_number); size_t sz = BASE_GEN_SIZE; int i = find_bucket (free_size); (gen->gen_free_spaces[i])--; if (gen_number == max_generation) { dprintf (3, ("Mb b%d: f- %Id (%Id)", i, free_size, gen->gen_free_spaces[i])); } #else UNREFERENCED_PARAMETER(gen_number); UNREFERENCED_PARAMETER(free_size); #endif //FREE_USAGE_STATS } #ifdef DOUBLY_LINKED_FL // This is only called on free spaces. BOOL gc_heap::should_set_bgc_mark_bit (uint8_t* o) { if (!current_sweep_seg) { assert (current_bgc_state == bgc_not_in_process); return FALSE; } // This is cheaper so I am doing this comparision first before having to get the seg for o. if (in_range_for_segment (o, current_sweep_seg)) { // The current sweep seg could have free spaces beyond its background_allocated so we need // to check for that. if ((o >= current_sweep_pos) && (o < heap_segment_background_allocated (current_sweep_seg))) { #ifndef USE_REGIONS if (current_sweep_seg == saved_sweep_ephemeral_seg) { return (o < saved_sweep_ephemeral_start); } else #endif //!USE_REGIONS { return TRUE; } } else return FALSE; } else { // We can have segments outside the BGC range that were allocated during mark - and we // wouldn't have committed the mark array for them and their background_allocated would be // non-zero. Don't set mark bits for those. // The ones allocated during BGC sweep would have their background_allocated as 0. if ((o >= background_saved_lowest_address) && (o < background_saved_highest_address)) { heap_segment* seg = seg_mapping_table_segment_of (o); // if bgc_allocated is 0 it means it was allocated during bgc sweep, // and everything on it should be considered live. uint8_t* background_allocated = heap_segment_background_allocated (seg); if (background_allocated == 0) return FALSE; // During BGC sweep gen1 GCs could add some free spaces in gen2. // If we use those, we should not set the mark bits on them. // They could either be a newly allocated seg which is covered by the // above case; or they are on a seg that's seen but beyond what BGC mark // saw. else if (o >= background_allocated) return FALSE; else return (!heap_segment_swept_p (seg)); } else return FALSE; } } #endif //DOUBLY_LINKED_FL uint8_t* gc_heap::allocate_in_older_generation (generation* gen, size_t size, int from_gen_number, uint8_t* old_loc REQD_ALIGN_AND_OFFSET_DCL) { size = Align (size); assert (size >= Align (min_obj_size)); assert (from_gen_number < max_generation); assert (from_gen_number >= 0); assert (generation_of (from_gen_number + 1) == gen); #ifdef DOUBLY_LINKED_FL BOOL consider_bgc_mark_p = FALSE; BOOL check_current_sweep_p = FALSE; BOOL check_saved_sweep_p = FALSE; BOOL try_added_list_p = (gen->gen_num == max_generation); BOOL record_free_list_allocated_p = ((gen->gen_num == max_generation) && (current_c_gc_state == c_gc_state_planning)); #endif //DOUBLY_LINKED_FL allocator* gen_allocator = generation_allocator (gen); BOOL discard_p = gen_allocator->discard_if_no_fit_p (); #ifdef SHORT_PLUGS int pad_in_front = ((old_loc != 0) && ((from_gen_number+1) != max_generation)) ? USE_PADDING_FRONT : 0; #else //SHORT_PLUGS int pad_in_front = 0; #endif //SHORT_PLUGS size_t real_size = size + Align (min_obj_size); if (pad_in_front) real_size += Align (min_obj_size); #ifdef RESPECT_LARGE_ALIGNMENT real_size += switch_alignment_size (pad_in_front); #endif //RESPECT_LARGE_ALIGNMENT if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen), generation_allocation_limit (gen), old_loc, USE_PADDING_TAIL | pad_in_front))) { for (unsigned int a_l_idx = gen_allocator->first_suitable_bucket(real_size * 2); a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++) { uint8_t* free_list = 0; uint8_t* prev_free_item = 0; BOOL use_undo_p = !discard_p; #ifdef DOUBLY_LINKED_FL if (a_l_idx == 0) { use_undo_p = FALSE; } if (try_added_list_p) { free_list = gen_allocator->added_alloc_list_head_of (a_l_idx); while (free_list != 0) { dprintf (3, ("considering free list in added list%Ix", (size_t)free_list)); size_t free_list_size = unused_array_size (free_list); if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, free_list, (free_list + free_list_size), old_loc, USE_PADDING_TAIL | pad_in_front)) { dprintf (4, ("F:%Ix-%Id", (size_t)free_list, free_list_size)); gen_allocator->unlink_item_no_undo_added (a_l_idx, free_list, prev_free_item); generation_free_list_space (gen) -= free_list_size; assert ((ptrdiff_t)generation_free_list_space (gen) >= 0); remove_gen_free (gen->gen_num, free_list_size); if (record_free_list_allocated_p) { generation_set_bgc_mark_bit_p (gen) = should_set_bgc_mark_bit (free_list); dprintf (3333, ("SFA: %Ix->%Ix(%d)", free_list, (free_list + free_list_size), (generation_set_bgc_mark_bit_p (gen) ? 1 : 0))); } adjust_limit (free_list, free_list_size, gen); generation_allocate_end_seg_p (gen) = FALSE; goto finished; } // We do first fit on bucket 0 because we are not guaranteed to find a fit there. else if (a_l_idx == 0) { dprintf (3, ("couldn't use this free area, discarding")); generation_free_obj_space (gen) += free_list_size; gen_allocator->unlink_item_no_undo_added (a_l_idx, free_list, prev_free_item); generation_free_list_space (gen) -= free_list_size; assert ((ptrdiff_t)generation_free_list_space (gen) >= 0); remove_gen_free (gen->gen_num, free_list_size); } else { prev_free_item = free_list; } free_list = free_list_slot (free_list); } } #endif //DOUBLY_LINKED_FL free_list = gen_allocator->alloc_list_head_of (a_l_idx); prev_free_item = 0; while (free_list != 0) { dprintf (3, ("considering free list %Ix", (size_t)free_list)); size_t free_list_size = unused_array_size (free_list); if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, free_list, (free_list + free_list_size), old_loc, USE_PADDING_TAIL | pad_in_front)) { dprintf (4, ("F:%Ix-%Id", (size_t)free_list, free_list_size)); gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, use_undo_p); generation_free_list_space (gen) -= free_list_size; assert ((ptrdiff_t)generation_free_list_space (gen) >= 0); remove_gen_free (gen->gen_num, free_list_size); #ifdef DOUBLY_LINKED_FL if (!discard_p && !use_undo_p) { gen2_removed_no_undo += free_list_size; dprintf (3, ("h%d: remove with no undo %Id = %Id", heap_number, free_list_size, gen2_removed_no_undo)); } if (record_free_list_allocated_p) { generation_set_bgc_mark_bit_p (gen) = should_set_bgc_mark_bit (free_list); dprintf (3333, ("SF: %Ix(%d)", free_list, (generation_set_bgc_mark_bit_p (gen) ? 1 : 0))); } #endif //DOUBLY_LINKED_FL adjust_limit (free_list, free_list_size, gen); generation_allocate_end_seg_p (gen) = FALSE; goto finished; } // We do first fit on bucket 0 because we are not guaranteed to find a fit there. else if (discard_p || (a_l_idx == 0)) { dprintf (3, ("couldn't use this free area, discarding")); generation_free_obj_space (gen) += free_list_size; gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE); generation_free_list_space (gen) -= free_list_size; assert ((ptrdiff_t)generation_free_list_space (gen) >= 0); remove_gen_free (gen->gen_num, free_list_size); #ifdef DOUBLY_LINKED_FL if (!discard_p) { gen2_removed_no_undo += free_list_size; dprintf (3, ("h%d: b0 remove with no undo %Id = %Id", heap_number, free_list_size, gen2_removed_no_undo)); } #endif //DOUBLY_LINKED_FL } else { prev_free_item = free_list; } free_list = free_list_slot (free_list); } } #ifdef USE_REGIONS // We don't want to always go back to the first region since there might be many. heap_segment* seg = generation_allocation_segment (gen); dprintf (3, ("end of seg, starting from alloc seg %Ix", heap_segment_mem (seg))); assert (seg != ephemeral_heap_segment); while (true) #else //go back to the beginning of the segment list heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); if (seg != generation_allocation_segment (gen)) { leave_allocation_segment (gen); generation_allocation_segment (gen) = seg; } while (seg != ephemeral_heap_segment) #endif //USE_REGIONS { if (size_fit_p(size REQD_ALIGN_AND_OFFSET_ARG, heap_segment_plan_allocated (seg), heap_segment_committed (seg), old_loc, USE_PADDING_TAIL | pad_in_front)) { adjust_limit (heap_segment_plan_allocated (seg), (heap_segment_committed (seg) - heap_segment_plan_allocated (seg)), gen); generation_allocate_end_seg_p (gen) = TRUE; heap_segment_plan_allocated (seg) = heap_segment_committed (seg); dprintf (3, ("seg %Ix is used for end of seg alloc", heap_segment_mem (seg))); goto finished; } else { if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, heap_segment_plan_allocated (seg), heap_segment_reserved (seg), old_loc, USE_PADDING_TAIL | pad_in_front) && grow_heap_segment (seg, heap_segment_plan_allocated (seg), old_loc, size, pad_in_front REQD_ALIGN_AND_OFFSET_ARG)) { adjust_limit (heap_segment_plan_allocated (seg), (heap_segment_committed (seg) - heap_segment_plan_allocated (seg)), gen); generation_allocate_end_seg_p (gen) = TRUE; heap_segment_plan_allocated (seg) = heap_segment_committed (seg); dprintf (3, ("seg %Ix is used for end of seg alloc after grow, %Ix", heap_segment_mem (seg), heap_segment_committed (seg))); goto finished; } else { leave_allocation_segment (gen); heap_segment* next_seg = heap_segment_next_rw (seg); #ifdef USE_REGIONS assert (next_seg != ephemeral_heap_segment); #endif //USE_REGIONS if (next_seg) { generation_allocation_segment (gen) = next_seg; generation_allocation_pointer (gen) = heap_segment_mem (next_seg); generation_allocation_limit (gen) = generation_allocation_pointer (gen); dprintf (3, ("alloc region advanced to %Ix", heap_segment_mem (next_seg))); } else { size = 0; goto finished; } } } seg = generation_allocation_segment (gen); } //No need to fix the last region. Will be done later size = 0; goto finished; } finished: if (0 == size) { return 0; } else { uint8_t* result = generation_allocation_pointer (gen); size_t pad = 0; #ifdef SHORT_PLUGS if ((pad_in_front & USE_PADDING_FRONT) && (((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))==0) || ((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))>=DESIRED_PLUG_LENGTH))) { pad = Align (min_obj_size); set_plug_padded (old_loc); } #endif //SHORT_PLUGS #ifdef FEATURE_STRUCTALIGN _ASSERTE(!old_loc || alignmentOffset != 0); _ASSERTE(old_loc || requiredAlignment == DATA_ALIGNMENT); if (old_loc != 0) { size_t pad1 = ComputeStructAlignPad(result+pad, requiredAlignment, alignmentOffset); set_node_aligninfo (old_loc, requiredAlignment, pad1); pad += pad1; } #else // FEATURE_STRUCTALIGN if (!((old_loc == 0) || same_large_alignment_p (old_loc, result+pad))) { pad += switch_alignment_size (pad != 0); set_node_realigned (old_loc); dprintf (3, ("Allocation realignment old_loc: %Ix, new_loc:%Ix", (size_t)old_loc, (size_t)(result+pad))); assert (same_large_alignment_p (result + pad, old_loc)); } #endif // FEATURE_STRUCTALIGN dprintf (3, ("Allocate %Id bytes", size)); if ((old_loc == 0) || (pad != 0)) { //allocating a non plug or a gap, so reset the start region generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen); } generation_allocation_pointer (gen) += size + pad; assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen)); generation_free_obj_space (gen) += pad; if (generation_allocate_end_seg_p (gen)) { generation_end_seg_allocated (gen) += size; } else { #ifdef DOUBLY_LINKED_FL if (generation_set_bgc_mark_bit_p (gen)) { dprintf (2, ("IOM: %Ix(->%Ix(%Id) (%Ix-%Ix)", old_loc, result, pad, (size_t)(&mark_array [mark_word_of (result)]), (size_t)(mark_array [mark_word_of (result)]))); set_plug_bgc_mark_bit (old_loc); } generation_last_free_list_allocated (gen) = old_loc; #endif //DOUBLY_LINKED_FL generation_free_list_allocated (gen) += size; } generation_allocation_size (gen) += size; dprintf (3, ("aio: ptr: %Ix, limit: %Ix, sr: %Ix", generation_allocation_pointer (gen), generation_allocation_limit (gen), generation_allocation_context_start_region (gen))); return (result + pad); } } #ifndef USE_REGIONS void gc_heap::repair_allocation_in_expanded_heap (generation* consing_gen) { //make sure that every generation has a planned allocation start int gen_number = max_generation - 1; while (gen_number>= 0) { generation* gen = generation_of (gen_number); if (0 == generation_plan_allocation_start (gen)) { realloc_plan_generation_start (gen, consing_gen); assert (generation_plan_allocation_start (gen)); } gen_number--; } // now we know the planned allocation size size_t size = (generation_allocation_limit (consing_gen) - generation_allocation_pointer (consing_gen)); heap_segment* seg = generation_allocation_segment (consing_gen); if (generation_allocation_limit (consing_gen) == heap_segment_plan_allocated (seg)) { if (size != 0) { heap_segment_plan_allocated (seg) = generation_allocation_pointer (consing_gen); } } else { assert (settings.condemned_generation == max_generation); uint8_t* first_address = generation_allocation_limit (consing_gen); //look through the pinned plugs for relevant ones. //Look for the right pinned plug to start from. size_t mi = 0; mark* m = 0; while (mi != mark_stack_tos) { m = pinned_plug_of (mi); if ((pinned_plug (m) == first_address)) break; else mi++; } assert (mi != mark_stack_tos); pinned_len (m) = size; } } //tododefrag optimize for new segment (plan_allocated == mem) uint8_t* gc_heap::allocate_in_expanded_heap (generation* gen, size_t size, BOOL& adjacentp, uint8_t* old_loc, #ifdef SHORT_PLUGS BOOL set_padding_on_saved_p, mark* pinned_plug_entry, #endif //SHORT_PLUGS BOOL consider_bestfit, int active_new_gen_number REQD_ALIGN_AND_OFFSET_DCL) { dprintf (3, ("aie: P: %Ix, size: %Ix", old_loc, size)); size = Align (size); assert (size >= Align (min_obj_size)); #ifdef SHORT_PLUGS int pad_in_front = ((old_loc != 0) && (active_new_gen_number != max_generation)) ? USE_PADDING_FRONT : 0; #else //SHORT_PLUGS int pad_in_front = 0; #endif //SHORT_PLUGS if (consider_bestfit && use_bestfit) { assert (bestfit_seg); dprintf (SEG_REUSE_LOG_1, ("reallocating 0x%Ix in expanded heap, size: %Id", old_loc, size)); return bestfit_seg->fit (old_loc, size REQD_ALIGN_AND_OFFSET_ARG); } heap_segment* seg = generation_allocation_segment (gen); if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen), generation_allocation_limit (gen), old_loc, ((generation_allocation_limit (gen) != heap_segment_plan_allocated (seg))? USE_PADDING_TAIL : 0) | pad_in_front))) { dprintf (3, ("aie: can't fit: ptr: %Ix, limit: %Ix", generation_allocation_pointer (gen), generation_allocation_limit (gen))); adjacentp = FALSE; uint8_t* first_address = (generation_allocation_limit (gen) ? generation_allocation_limit (gen) : heap_segment_mem (seg)); assert (in_range_for_segment (first_address, seg)); uint8_t* end_address = heap_segment_reserved (seg); dprintf (3, ("aie: first_addr: %Ix, gen alloc limit: %Ix, end_address: %Ix", first_address, generation_allocation_limit (gen), end_address)); size_t mi = 0; mark* m = 0; if (heap_segment_allocated (seg) != heap_segment_mem (seg)) { assert (settings.condemned_generation == max_generation); //look through the pinned plugs for relevant ones. //Look for the right pinned plug to start from. while (mi != mark_stack_tos) { m = pinned_plug_of (mi); if ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address)) { dprintf (3, ("aie: found pin: %Ix", pinned_plug (m))); break; } else mi++; } if (mi != mark_stack_tos) { //fix old free list. size_t hsize = (generation_allocation_limit (gen) - generation_allocation_pointer (gen)); { dprintf(3,("gc filling up hole")); ptrdiff_t mi1 = (ptrdiff_t)mi; while ((mi1 >= 0) && (pinned_plug (pinned_plug_of(mi1)) != generation_allocation_limit (gen))) { dprintf (3, ("aie: checking pin %Ix", pinned_plug (pinned_plug_of(mi1)))); mi1--; } if (mi1 >= 0) { size_t saved_pinned_len = pinned_len (pinned_plug_of(mi1)); pinned_len (pinned_plug_of(mi1)) = hsize; dprintf (3, ("changing %Ix len %Ix->%Ix", pinned_plug (pinned_plug_of(mi1)), saved_pinned_len, pinned_len (pinned_plug_of(mi1)))); } } } } else { assert (generation_allocation_limit (gen) == generation_allocation_pointer (gen)); mi = mark_stack_tos; } while ((mi != mark_stack_tos) && in_range_for_segment (pinned_plug (m), seg)) { size_t len = pinned_len (m); uint8_t* free_list = (pinned_plug (m) - len); dprintf (3, ("aie: testing free item: %Ix->%Ix(%Ix)", free_list, (free_list + len), len)); if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, free_list, (free_list + len), old_loc, USE_PADDING_TAIL | pad_in_front)) { dprintf (3, ("aie: Found adequate unused area: %Ix, size: %Id", (size_t)free_list, len)); { generation_allocation_pointer (gen) = free_list; generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen); generation_allocation_limit (gen) = (free_list + len); } goto allocate_in_free; } mi++; m = pinned_plug_of (mi); } //switch to the end of the segment. generation_allocation_pointer (gen) = heap_segment_plan_allocated (seg); generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen); heap_segment_plan_allocated (seg) = heap_segment_committed (seg); generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); dprintf (3, ("aie: switching to end of seg: %Ix->%Ix(%Ix)", generation_allocation_pointer (gen), generation_allocation_limit (gen), (generation_allocation_limit (gen) - generation_allocation_pointer (gen)))); if (!size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen), generation_allocation_limit (gen), old_loc, USE_PADDING_TAIL | pad_in_front)) { dprintf (3, ("aie: ptr: %Ix, limit: %Ix, can't alloc", generation_allocation_pointer (gen), generation_allocation_limit (gen))); assert (!"Can't allocate if no free space"); return 0; } } else { adjacentp = TRUE; } allocate_in_free: { uint8_t* result = generation_allocation_pointer (gen); size_t pad = 0; #ifdef SHORT_PLUGS if ((pad_in_front & USE_PADDING_FRONT) && (((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))==0) || ((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))>=DESIRED_PLUG_LENGTH))) { pad = Align (min_obj_size); set_padding_in_expand (old_loc, set_padding_on_saved_p, pinned_plug_entry); } #endif //SHORT_PLUGS #ifdef FEATURE_STRUCTALIGN _ASSERTE(!old_loc || alignmentOffset != 0); _ASSERTE(old_loc || requiredAlignment == DATA_ALIGNMENT); if (old_loc != 0) { size_t pad1 = ComputeStructAlignPad(result+pad, requiredAlignment, alignmentOffset); set_node_aligninfo (old_loc, requiredAlignment, pad1); pad += pad1; adjacentp = FALSE; } #else // FEATURE_STRUCTALIGN if (!((old_loc == 0) || same_large_alignment_p (old_loc, result+pad))) { pad += switch_alignment_size (pad != 0); set_node_realigned (old_loc); dprintf (3, ("Allocation realignment old_loc: %Ix, new_loc:%Ix", (size_t)old_loc, (size_t)(result+pad))); assert (same_large_alignment_p (result + pad, old_loc)); adjacentp = FALSE; } #endif // FEATURE_STRUCTALIGN if ((old_loc == 0) || (pad != 0)) { //allocating a non plug or a gap, so reset the start region generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen); } generation_allocation_pointer (gen) += size + pad; assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen)); dprintf (3, ("Allocated in expanded heap %Ix:%Id", (size_t)(result+pad), size)); dprintf (3, ("aie: ptr: %Ix, limit: %Ix, sr: %Ix", generation_allocation_pointer (gen), generation_allocation_limit (gen), generation_allocation_context_start_region (gen))); return result + pad; } } generation* gc_heap::ensure_ephemeral_heap_segment (generation* consing_gen) { heap_segment* seg = generation_allocation_segment (consing_gen); if (seg != ephemeral_heap_segment) { assert (generation_allocation_pointer (consing_gen)>= heap_segment_mem (seg)); assert (generation_allocation_pointer (consing_gen)<= heap_segment_committed (seg)); //fix the allocated size of the segment. heap_segment_plan_allocated (seg) = generation_allocation_pointer (consing_gen); generation* new_consing_gen = generation_of (max_generation - 1); generation_allocation_pointer (new_consing_gen) = heap_segment_mem (ephemeral_heap_segment); generation_allocation_limit (new_consing_gen) = generation_allocation_pointer (new_consing_gen); generation_allocation_context_start_region (new_consing_gen) = generation_allocation_pointer (new_consing_gen); generation_allocation_segment (new_consing_gen) = ephemeral_heap_segment; return new_consing_gen; } else return consing_gen; } #endif //!USE_REGIONS inline void gc_heap::init_alloc_info (generation* gen, heap_segment* seg) { generation_allocation_segment (gen) = seg; generation_allocation_pointer (gen) = heap_segment_mem (seg); generation_allocation_limit (gen) = generation_allocation_pointer (gen); generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen); } inline heap_segment* gc_heap::get_next_alloc_seg (generation* gen) { #ifdef USE_REGIONS heap_segment* saved_region = generation_allocation_segment (gen); int gen_num = heap_segment_gen_num (saved_region); heap_segment* region = saved_region; while (1) { region = heap_segment_non_sip (region); if (region) { break; } else { if (gen_num > 0) { gen_num--; region = generation_start_segment (generation_of (gen_num)); dprintf (REGIONS_LOG, ("h%d next alloc region: switching to next gen%d start %Ix(%Ix)", heap_number, heap_segment_gen_num (region), (size_t)region, heap_segment_mem (region))); } else { assert (!"ran out regions when getting the next alloc seg!"); } } } if (region != saved_region) { dprintf (REGIONS_LOG, ("init allocate region for gen%d to %Ix(%d)", gen->gen_num, heap_segment_mem (region), heap_segment_gen_num (region))); init_alloc_info (gen, region); } return region; #else return generation_allocation_segment (gen); #endif //USE_REGIONS } uint8_t* gc_heap::allocate_in_condemned_generations (generation* gen, size_t size, int from_gen_number, #ifdef SHORT_PLUGS BOOL* convert_to_pinned_p, uint8_t* next_pinned_plug, heap_segment* current_seg, #endif //SHORT_PLUGS uint8_t* old_loc REQD_ALIGN_AND_OFFSET_DCL) { #ifndef USE_REGIONS // Make sure that the youngest generation gap hasn't been allocated if (settings.promotion) { assert (generation_plan_allocation_start (youngest_generation) == 0); } #endif //!USE_REGIONS size = Align (size); assert (size >= Align (min_obj_size)); int to_gen_number = from_gen_number; if (from_gen_number != (int)max_generation) { to_gen_number = from_gen_number + (settings.promotion ? 1 : 0); } dprintf (3, ("aic gen%d: s: %Id, ac: %Ix-%Ix", gen->gen_num, size, generation_allocation_pointer (gen), generation_allocation_limit (gen))); #ifdef SHORT_PLUGS int pad_in_front = ((old_loc != 0) && (to_gen_number != max_generation)) ? USE_PADDING_FRONT : 0; #else //SHORT_PLUGS int pad_in_front = 0; #endif //SHORT_PLUGS if ((from_gen_number != -1) && (from_gen_number != (int)max_generation) && settings.promotion) { generation_condemned_allocated (generation_of (from_gen_number + (settings.promotion ? 1 : 0))) += size; generation_allocation_size (generation_of (from_gen_number + (settings.promotion ? 1 : 0))) += size; } retry: { heap_segment* seg = get_next_alloc_seg (gen); if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen), generation_allocation_limit (gen), old_loc, ((generation_allocation_limit (gen) != heap_segment_plan_allocated (seg))?USE_PADDING_TAIL:0)|pad_in_front))) { if ((! (pinned_plug_que_empty_p()) && (generation_allocation_limit (gen) == pinned_plug (oldest_pin())))) { size_t entry = deque_pinned_plug(); mark* pinned_plug_entry = pinned_plug_of (entry); size_t len = pinned_len (pinned_plug_entry); uint8_t* plug = pinned_plug (pinned_plug_entry); set_new_pin_info (pinned_plug_entry, generation_allocation_pointer (gen)); #ifdef USE_REGIONS if (to_gen_number == 0) { update_planned_gen0_free_space (pinned_len (pinned_plug_entry), plug); dprintf (REGIONS_LOG, ("aic: not promotion, gen0 added free space %Id at %Ix", pinned_len (pinned_plug_entry), plug)); } #endif //USE_REGIONS #ifdef FREE_USAGE_STATS generation_allocated_in_pinned_free (gen) += generation_allocated_since_last_pin (gen); dprintf (3, ("allocated %Id so far within pin %Ix, total->%Id", generation_allocated_since_last_pin (gen), plug, generation_allocated_in_pinned_free (gen))); generation_allocated_since_last_pin (gen) = 0; add_item_to_current_pinned_free (gen->gen_num, pinned_len (pinned_plug_of (entry))); #endif //FREE_USAGE_STATS dprintf (3, ("mark stack bos: %Id, tos: %Id, aic: p %Ix len: %Ix->%Ix", mark_stack_bos, mark_stack_tos, plug, len, pinned_len (pinned_plug_of (entry)))); assert(mark_stack_array[entry].len == 0 || mark_stack_array[entry].len >= Align(min_obj_size)); generation_allocation_pointer (gen) = plug + len; generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen); generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); set_allocator_next_pin (gen); //Add the size of the pinned plug to the right pinned allocations //find out which gen this pinned plug came from int frgn = object_gennum (plug); if ((frgn != (int)max_generation) && settings.promotion) { generation_pinned_allocation_sweep_size (generation_of (frgn + 1)) += len; #ifdef USE_REGIONS // With regions it's a bit more complicated since we only set the plan_gen_num // of a region after we've planned it. This means if the pinning plug is in the // the same seg we are planning, we haven't set its plan_gen_num yet. So we // need to check for that first. int togn = (in_range_for_segment (plug, seg) ? to_gen_number : object_gennum_plan (plug)); #else int togn = object_gennum_plan (plug); #endif //USE_REGIONS if (frgn < togn) { generation_pinned_allocation_compact_size (generation_of (togn)) += len; } } goto retry; } if (generation_allocation_limit (gen) != heap_segment_plan_allocated (seg)) { generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); dprintf (3, ("changed limit to plan alloc: %Ix", generation_allocation_limit (gen))); } else { if (heap_segment_plan_allocated (seg) != heap_segment_committed (seg)) { heap_segment_plan_allocated (seg) = heap_segment_committed (seg); generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); dprintf (3, ("changed limit to commit: %Ix", generation_allocation_limit (gen))); } else { #if !defined(RESPECT_LARGE_ALIGNMENT) && !defined(USE_REGIONS) assert (gen != youngest_generation); #endif //!RESPECT_LARGE_ALIGNMENT && !USE_REGIONS if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen), heap_segment_reserved (seg), old_loc, USE_PADDING_TAIL | pad_in_front) && (grow_heap_segment (seg, generation_allocation_pointer (gen), old_loc, size, pad_in_front REQD_ALIGN_AND_OFFSET_ARG))) { dprintf (3, ("Expanded segment allocation by committing more memory")); heap_segment_plan_allocated (seg) = heap_segment_committed (seg); generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); } else { heap_segment* next_seg = heap_segment_next (seg); dprintf (REGIONS_LOG, ("aic next: %Ix(%Ix,%Ix) -> %Ix(%Ix,%Ix)", heap_segment_mem (seg), heap_segment_allocated (seg), heap_segment_plan_allocated (seg), (next_seg ? heap_segment_mem (next_seg) : 0), (next_seg ? heap_segment_allocated (next_seg) : 0), (next_seg ? heap_segment_plan_allocated (next_seg) : 0))); assert (generation_allocation_pointer (gen)>= heap_segment_mem (seg)); // Verify that all pinned plugs for this segment are consumed if (!pinned_plug_que_empty_p() && ((pinned_plug (oldest_pin()) < heap_segment_allocated (seg)) && (pinned_plug (oldest_pin()) >= generation_allocation_pointer (gen)))) { LOG((LF_GC, LL_INFO10, "remaining pinned plug %Ix while leaving segment on allocation", pinned_plug (oldest_pin()))); FATAL_GC_ERROR(); } assert (generation_allocation_pointer (gen)>= heap_segment_mem (seg)); assert (generation_allocation_pointer (gen)<= heap_segment_committed (seg)); heap_segment_plan_allocated (seg) = generation_allocation_pointer (gen); #ifdef USE_REGIONS set_region_plan_gen_num (seg, to_gen_number); if ((next_seg == 0) && (heap_segment_gen_num (seg) > 0)) { // We need to switch to a younger gen's segments so the allocate seg will be in // sync with the pins. next_seg = generation_start_segment (generation_of (heap_segment_gen_num (seg) - 1)); dprintf (REGIONS_LOG, ("h%d aic: switching to next gen%d start %Ix(%Ix)", heap_number, heap_segment_gen_num (next_seg), (size_t)next_seg, heap_segment_mem (next_seg))); } #endif //USE_REGIONS if (next_seg) { init_alloc_info (gen, next_seg); } else { #ifdef USE_REGIONS assert (!"should not happen for regions!"); #else return 0; //should only happen during allocation of generation 0 gap // in that case we are going to grow the heap anyway #endif //USE_REGIONS } } } } set_allocator_next_pin (gen); goto retry; } } { assert (generation_allocation_pointer (gen)>= heap_segment_mem (generation_allocation_segment (gen))); uint8_t* result = generation_allocation_pointer (gen); size_t pad = 0; #ifdef SHORT_PLUGS if ((pad_in_front & USE_PADDING_FRONT) && (((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))==0) || ((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))>=DESIRED_PLUG_LENGTH))) { ptrdiff_t dist = old_loc - result; if (dist == 0) { dprintf (3, ("old alloc: %Ix, same as new alloc, not padding", old_loc)); pad = 0; } else { if ((dist > 0) && (dist < (ptrdiff_t)Align (min_obj_size))) { dprintf (1, ("old alloc: %Ix, only %d bytes > new alloc! Shouldn't happen", old_loc, dist)); FATAL_GC_ERROR(); } pad = Align (min_obj_size); set_plug_padded (old_loc); } } #endif //SHORT_PLUGS #ifdef FEATURE_STRUCTALIGN _ASSERTE(!old_loc || alignmentOffset != 0); _ASSERTE(old_loc || requiredAlignment == DATA_ALIGNMENT); if ((old_loc != 0)) { size_t pad1 = ComputeStructAlignPad(result+pad, requiredAlignment, alignmentOffset); set_node_aligninfo (old_loc, requiredAlignment, pad1); pad += pad1; } #else // FEATURE_STRUCTALIGN if (!((old_loc == 0) || same_large_alignment_p (old_loc, result+pad))) { pad += switch_alignment_size (pad != 0); set_node_realigned(old_loc); dprintf (3, ("Allocation realignment old_loc: %Ix, new_loc:%Ix", (size_t)old_loc, (size_t)(result+pad))); assert (same_large_alignment_p (result + pad, old_loc)); } #endif // FEATURE_STRUCTALIGN #ifdef SHORT_PLUGS if ((next_pinned_plug != 0) && (pad != 0) && (generation_allocation_segment (gen) == current_seg)) { assert (old_loc != 0); ptrdiff_t dist_to_next_pin = (ptrdiff_t)(next_pinned_plug - (generation_allocation_pointer (gen) + size + pad)); assert (dist_to_next_pin >= 0); if ((dist_to_next_pin >= 0) && (dist_to_next_pin < (ptrdiff_t)Align (min_obj_size))) { dprintf (3, ("%Ix->(%Ix,%Ix),%Ix(%Ix)(%Ix),NP->PP", old_loc, generation_allocation_pointer (gen), generation_allocation_limit (gen), next_pinned_plug, size, dist_to_next_pin)); clear_plug_padded (old_loc); pad = 0; *convert_to_pinned_p = TRUE; record_interesting_data_point (idp_converted_pin); return 0; } } #endif //SHORT_PLUGS if ((old_loc == 0) || (pad != 0)) { //allocating a non plug or a gap, so reset the start region generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen); } generation_allocation_pointer (gen) += size + pad; assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen)); if ((pad > 0) && (to_gen_number >= 0)) { generation_free_obj_space (generation_of (to_gen_number)) += pad; } #ifdef FREE_USAGE_STATS generation_allocated_since_last_pin (gen) += size; #endif //FREE_USAGE_STATS dprintf (3, ("aic: old: %Ix ptr: %Ix, limit: %Ix, sr: %Ix, res: %Ix, pad: %Id", old_loc, generation_allocation_pointer (gen), generation_allocation_limit (gen), generation_allocation_context_start_region (gen), result, (size_t)pad)); assert (result + pad); return result + pad; } } int gc_heap::joined_generation_to_condemn (BOOL should_evaluate_elevation, int initial_gen, int current_gen, BOOL* blocking_collection_p STRESS_HEAP_ARG(int n_original)) { gc_data_global.gen_to_condemn_reasons.init(); #ifdef BGC_SERVO_TUNING if (settings.entry_memory_load == 0) { uint32_t current_memory_load = 0; uint64_t current_available_physical = 0; get_memory_info (&current_memory_load, &current_available_physical); settings.entry_memory_load = current_memory_load; settings.entry_available_physical_mem = current_available_physical; } #endif //BGC_SERVO_TUNING int n = current_gen; #ifdef MULTIPLE_HEAPS BOOL joined_last_gc_before_oom = FALSE; for (int i = 0; i < n_heaps; i++) { if (g_heaps[i]->last_gc_before_oom) { dprintf (GTC_LOG, ("h%d is setting blocking to TRUE", i)); joined_last_gc_before_oom = TRUE; break; } } #else BOOL joined_last_gc_before_oom = last_gc_before_oom; #endif //MULTIPLE_HEAPS if (joined_last_gc_before_oom && settings.pause_mode != pause_low_latency) { assert (*blocking_collection_p); } if (should_evaluate_elevation && (n == max_generation)) { dprintf (GTC_LOG, ("lock: %d(%d)", (settings.should_lock_elevation ? 1 : 0), settings.elevation_locked_count)); if (settings.should_lock_elevation) { settings.elevation_locked_count++; if (settings.elevation_locked_count == 6) { settings.elevation_locked_count = 0; } else { n = max_generation - 1; gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_avoid_unproductive); settings.elevation_reduced = TRUE; } } else { settings.elevation_locked_count = 0; } } else { settings.should_lock_elevation = FALSE; settings.elevation_locked_count = 0; } if (provisional_mode_triggered && (n == max_generation)) { // There are a few cases where we should not reduce the generation. if ((initial_gen == max_generation) || (settings.reason == reason_alloc_loh)) { // If we are doing a full GC in the provisional mode, we always // make it blocking because we don't want to get into a situation // where foreground GCs are asking for a compacting full GC right away // and not getting it. dprintf (GTC_LOG, ("full GC induced, not reducing gen")); if (initial_gen == max_generation) { gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_pm_induced_fullgc_p); } else { gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_pm_alloc_loh); } *blocking_collection_p = TRUE; } else if (should_expand_in_full_gc || joined_last_gc_before_oom) { dprintf (GTC_LOG, ("need full blocking GCs to expand heap or avoid OOM, not reducing gen")); assert (*blocking_collection_p); } else { dprintf (GTC_LOG, ("reducing gen in PM: %d->%d->%d", initial_gen, n, (max_generation - 1))); gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_gen1_in_pm); n = max_generation - 1; } } if (should_expand_in_full_gc) { should_expand_in_full_gc = FALSE; } if (heap_hard_limit) { // If we have already consumed 90% of the limit, we should check to see if we should compact LOH. // TODO: should unify this with gen2. dprintf (GTC_LOG, ("committed %Id is %d%% of limit %Id", current_total_committed, (int)((float)current_total_committed * 100.0 / (float)heap_hard_limit), heap_hard_limit)); bool full_compact_gc_p = false; if (joined_last_gc_before_oom) { gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_limit_before_oom); full_compact_gc_p = true; } else if ((current_total_committed * 10) >= (heap_hard_limit * 9)) { size_t loh_frag = get_total_gen_fragmentation (loh_generation); // If the LOH frag is >= 1/8 it's worth compacting it if ((loh_frag * 8) >= heap_hard_limit) { dprintf (GTC_LOG, ("loh frag: %Id > 1/8 of limit %Id", loh_frag, (heap_hard_limit / 8))); gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_limit_loh_frag); full_compact_gc_p = true; } else { // If there's not much fragmentation but it looks like it'll be productive to // collect LOH, do that. size_t est_loh_reclaim = get_total_gen_estimated_reclaim (loh_generation); if ((est_loh_reclaim * 8) >= heap_hard_limit) { gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_limit_loh_reclaim); full_compact_gc_p = true; } dprintf (GTC_LOG, ("loh est reclaim: %Id, 1/8 of limit %Id", est_loh_reclaim, (heap_hard_limit / 8))); } } if (full_compact_gc_p) { n = max_generation; *blocking_collection_p = TRUE; settings.loh_compaction = TRUE; dprintf (GTC_LOG, ("compacting LOH due to hard limit")); } } if ((conserve_mem_setting != 0) && (n == max_generation)) { float frag_limit = 1.0f - conserve_mem_setting / 10.0f; size_t loh_size = get_total_gen_size (loh_generation); size_t gen2_size = get_total_gen_size (max_generation); float loh_frag_ratio = 0.0f; float combined_frag_ratio = 0.0f; if (loh_size != 0) { size_t loh_frag = get_total_gen_fragmentation (loh_generation); size_t gen2_frag = get_total_gen_fragmentation (max_generation); loh_frag_ratio = (float)loh_frag / (float)loh_size; combined_frag_ratio = (float)(gen2_frag + loh_frag) / (float)(gen2_size + loh_size); } if (combined_frag_ratio > frag_limit) { dprintf (GTC_LOG, ("combined frag: %f > limit %f, loh frag: %f", combined_frag_ratio, frag_limit, loh_frag_ratio)); gc_data_global.gen_to_condemn_reasons.set_condition (gen_max_high_frag_p); n = max_generation; *blocking_collection_p = TRUE; if (loh_frag_ratio > frag_limit) { settings.loh_compaction = TRUE; dprintf (GTC_LOG, ("compacting LOH due to GCConserveMem setting")); } } } #ifdef BGC_SERVO_TUNING if (bgc_tuning::should_trigger_ngc2()) { gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_servo_ngc); n = max_generation; *blocking_collection_p = TRUE; } if ((n < max_generation) && !gc_heap::background_running_p() && bgc_tuning::stepping_trigger (settings.entry_memory_load, get_current_gc_index (max_generation))) { gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_servo_initial); n = max_generation; saved_bgc_tuning_reason = reason_bgc_stepping; } if ((n < max_generation) && bgc_tuning::should_trigger_bgc()) { gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_servo_bgc); n = max_generation; } if (n == (max_generation - 1)) { if (bgc_tuning::should_delay_alloc (max_generation)) { gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_servo_postpone); n -= 1; } } #endif //BGC_SERVO_TUNING if ((n == max_generation) && (*blocking_collection_p == FALSE)) { // If we are doing a gen2 we should reset elevation regardless and let the gen2 // decide if we should lock again or in the bgc case by design we will not retract // gen1 start. settings.should_lock_elevation = FALSE; settings.elevation_locked_count = 0; dprintf (GTC_LOG, ("doing bgc, reset elevation")); } #ifdef STRESS_HEAP #ifdef BACKGROUND_GC // We can only do Concurrent GC Stress if the caller did not explicitly ask for all // generations to be collected, // // [LOCALGC TODO] STRESS_HEAP is not defined for a standalone GC so there are multiple // things that need to be fixed in this code block. if (n_original != max_generation && g_pConfig->GetGCStressLevel() && gc_can_use_concurrent) { #ifndef FEATURE_REDHAWK if (*blocking_collection_p) { // We call StressHeap() a lot for Concurrent GC Stress. However, // if we can not do a concurrent collection, no need to stress anymore. // @TODO: Enable stress when the memory pressure goes down again GCStressPolicy::GlobalDisable(); } else #endif // !FEATURE_REDHAWK { gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_stress); n = max_generation; } } #endif //BACKGROUND_GC #endif //STRESS_HEAP #ifdef BACKGROUND_GC if ((n == max_generation) && background_running_p()) { n = max_generation - 1; dprintf (GTC_LOG, ("bgc in progress - 1 instead of 2")); } #endif //BACKGROUND_GC return n; } inline size_t get_survived_size (gc_history_per_heap* hist) { size_t surv_size = 0; gc_generation_data* gen_data; for (int gen_number = 0; gen_number < total_generation_count; gen_number++) { gen_data = &(hist->gen_data[gen_number]); surv_size += (gen_data->size_after - gen_data->free_list_space_after - gen_data->free_obj_space_after); } return surv_size; } size_t gc_heap::get_total_survived_size() { size_t total_surv_size = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; gc_history_per_heap* current_gc_data_per_heap = hp->get_gc_data_per_heap(); total_surv_size += get_survived_size (current_gc_data_per_heap); } #else gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap(); total_surv_size = get_survived_size (current_gc_data_per_heap); #endif //MULTIPLE_HEAPS return total_surv_size; } size_t gc_heap::get_total_allocated_since_last_gc() { size_t total_allocated_size = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS total_allocated_size += hp->allocated_since_last_gc[0] + hp->allocated_since_last_gc[1]; hp->allocated_since_last_gc[0] = 0; hp->allocated_since_last_gc[1] = 0; } return total_allocated_size; } // Gets what's allocated on both SOH, LOH, etc that hasn't been collected. size_t gc_heap::get_current_allocated() { dynamic_data* dd = dynamic_data_of (0); size_t current_alloc = dd_desired_allocation (dd) - dd_new_allocation (dd); for (int i = uoh_start_generation; i < total_generation_count; i++) { dynamic_data* dd = dynamic_data_of (i); current_alloc += dd_desired_allocation (dd) - dd_new_allocation (dd); } return current_alloc; } size_t gc_heap::get_total_allocated() { size_t total_current_allocated = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; total_current_allocated += hp->get_current_allocated(); } #else total_current_allocated = get_current_allocated(); #endif //MULTIPLE_HEAPS return total_current_allocated; } size_t gc_heap::get_total_promoted() { size_t total_promoted_size = 0; int highest_gen = ((settings.condemned_generation == max_generation) ? (total_generation_count - 1) : settings.condemned_generation); #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS for (int gen_number = 0; gen_number <= highest_gen; gen_number++) { total_promoted_size += dd_promoted_size (hp->dynamic_data_of (gen_number)); } } return total_promoted_size; } #ifdef BGC_SERVO_TUNING size_t gc_heap::get_total_generation_size (int gen_number) { size_t total_generation_size = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS total_generation_size += hp->generation_size (gen_number); } return total_generation_size; } // gets all that's allocated into the gen. This is only used for gen2/3 // for servo tuning. size_t gc_heap::get_total_servo_alloc (int gen_number) { size_t total_alloc = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS generation* gen = hp->generation_of (gen_number); total_alloc += generation_free_list_allocated (gen); total_alloc += generation_end_seg_allocated (gen); total_alloc += generation_condemned_allocated (gen); total_alloc += generation_sweep_allocated (gen); } return total_alloc; } size_t gc_heap::get_total_bgc_promoted() { size_t total_bgc_promoted = 0; #ifdef MULTIPLE_HEAPS int num_heaps = gc_heap::n_heaps; #else //MULTIPLE_HEAPS int num_heaps = 1; #endif //MULTIPLE_HEAPS for (int i = 0; i < num_heaps; i++) { total_bgc_promoted += bpromoted_bytes (i); } return total_bgc_promoted; } // This is called after compute_new_dynamic_data is called, at which point // dd_current_size is calculated. size_t gc_heap::get_total_surv_size (int gen_number) { size_t total_surv_size = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS total_surv_size += dd_current_size (hp->dynamic_data_of (gen_number)); } return total_surv_size; } size_t gc_heap::get_total_begin_data_size (int gen_number) { size_t total_begin_data_size = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS total_begin_data_size += dd_begin_data_size (hp->dynamic_data_of (gen_number)); } return total_begin_data_size; } size_t gc_heap::get_total_generation_fl_size (int gen_number) { size_t total_generation_fl_size = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS total_generation_fl_size += generation_free_list_space (hp->generation_of (gen_number)); } return total_generation_fl_size; } size_t gc_heap::get_current_gc_index (int gen_number) { #ifdef MULTIPLE_HEAPS gc_heap* hp = gc_heap::g_heaps[0]; return dd_collection_count (hp->dynamic_data_of (gen_number)); #else return dd_collection_count (dynamic_data_of (gen_number)); #endif //MULTIPLE_HEAPS } #endif //BGC_SERVO_TUNING size_t gc_heap::current_generation_size (int gen_number) { dynamic_data* dd = dynamic_data_of (gen_number); size_t gen_size = (dd_current_size (dd) + dd_desired_allocation (dd) - dd_new_allocation (dd)); return gen_size; } #ifdef USE_REGIONS // We may need a new empty region while doing a GC so try to get one now, if we don't have any // reserve in the free region list. bool gc_heap::try_get_new_free_region() { heap_segment* region = 0; if (free_regions[basic_free_region].get_num_free_regions() > 0) { dprintf (REGIONS_LOG, ("h%d has %d free regions %Ix", heap_number, free_regions[basic_free_region].get_num_free_regions(), heap_segment_mem (free_regions[basic_free_region].get_first_free_region()))); return true; } else { region = allocate_new_region (__this, 0, false); if (region) { if (init_table_for_region (0, region)) { return_free_region (region); dprintf (REGIONS_LOG, ("h%d got a new empty region %Ix", heap_number, region)); } else { region = 0; } } } return (region != 0); } bool gc_heap::init_table_for_region (int gen_number, heap_segment* region) { #ifdef BACKGROUND_GC if (is_bgc_in_progress()) { dprintf (GC_TABLE_LOG, ("new seg %Ix, mark_array is %Ix", heap_segment_mem (region), mark_array)); if (!commit_mark_array_new_seg (__this, region)) { dprintf (GC_TABLE_LOG, ("failed to commit mark array for the new region %Ix-%Ix", get_region_start (region), heap_segment_reserved (region))); // We don't have memory to commit the mark array so we cannot use the new region. global_region_allocator.delete_region (get_region_start (region)); return false; } } #endif //BACKGROUND_GC if (gen_number <= max_generation) { size_t first_brick = brick_of (heap_segment_mem (region)); set_brick (first_brick, -1); } else { assert (brick_table[brick_of (heap_segment_mem (region))] == 0); } return true; } #endif //USE_REGIONS #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:6326) // "Potential comparison of a constant with another constant" is intentional in this function. #endif //_PREFAST_ /* This is called by when we are actually doing a GC, or when we are just checking whether we would do a full blocking GC, in which case check_only_p is TRUE. The difference between calling this with check_only_p TRUE and FALSE is that when it's TRUE: settings.reason is ignored budgets are not checked (since they are checked before this is called) it doesn't change anything non local like generation_skip_ratio */ int gc_heap::generation_to_condemn (int n_initial, BOOL* blocking_collection_p, BOOL* elevation_requested_p, BOOL check_only_p) { gc_mechanisms temp_settings = settings; gen_to_condemn_tuning temp_condemn_reasons; gc_mechanisms* local_settings = (check_only_p ? &temp_settings : &settings); gen_to_condemn_tuning* local_condemn_reasons = (check_only_p ? &temp_condemn_reasons : &gen_to_condemn_reasons); if (!check_only_p) { if ((local_settings->reason == reason_oos_soh) || (local_settings->reason == reason_oos_loh)) { assert (n_initial >= 1); } assert (settings.reason != reason_empty); } local_condemn_reasons->init(); int n = n_initial; int n_alloc = n; if (heap_number == 0) { dprintf (GTC_LOG, ("init: %d(%d)", n_initial, settings.reason)); } int i = 0; int temp_gen = 0; BOOL low_memory_detected = g_low_memory_status; uint32_t memory_load = 0; uint64_t available_physical = 0; uint64_t available_page_file = 0; BOOL check_memory = FALSE; BOOL high_fragmentation = FALSE; BOOL v_high_memory_load = FALSE; BOOL high_memory_load = FALSE; BOOL low_ephemeral_space = FALSE; BOOL evaluate_elevation = TRUE; *elevation_requested_p = FALSE; *blocking_collection_p = FALSE; BOOL check_max_gen_alloc = TRUE; #ifdef STRESS_HEAP int orig_gen = n; #endif //STRESS_HEAP if (!check_only_p) { dd_fragmentation (dynamic_data_of (0)) = generation_free_list_space (youngest_generation) + generation_free_obj_space (youngest_generation); for (int i = uoh_start_generation; i < total_generation_count; i++) { dd_fragmentation (dynamic_data_of (i)) = generation_free_list_space (generation_of (i)) + generation_free_obj_space (generation_of (i)); } //save new_allocation for (i = 0; i < total_generation_count; i++) { dynamic_data* dd = dynamic_data_of (i); dprintf (GTC_LOG, ("h%d: g%d: l: %Id (%Id)", heap_number, i, dd_new_allocation (dd), dd_desired_allocation (dd))); dd_gc_new_allocation (dd) = dd_new_allocation (dd); } local_condemn_reasons->set_gen (gen_initial, n); temp_gen = n; #ifdef BACKGROUND_GC if (gc_heap::background_running_p() #ifdef BGC_SERVO_TUNING || bgc_tuning::fl_tuning_triggered || (bgc_tuning::enable_fl_tuning && bgc_tuning::use_stepping_trigger_p) #endif //BGC_SERVO_TUNING ) { check_max_gen_alloc = FALSE; } #endif //BACKGROUND_GC if (check_max_gen_alloc) { //figure out if UOH objects need to be collected. for (int i = uoh_start_generation; i < total_generation_count; i++) { if (get_new_allocation (i) <= 0) { n = max_generation; local_condemn_reasons->set_gen (gen_alloc_budget, n); dprintf (BGC_TUNING_LOG, ("BTL[GTC]: trigger based on gen%d b: %Id", (i), get_new_allocation (i))); break; } } } //figure out which generation ran out of allocation for (i = n+1; i <= (check_max_gen_alloc ? max_generation : (max_generation - 1)); i++) { if (get_new_allocation (i) <= 0) { n = i; if (n == max_generation) { dprintf (BGC_TUNING_LOG, ("BTL[GTC]: trigger based on gen2 b: %Id", get_new_allocation (max_generation))); } } else break; } } if (n > temp_gen) { local_condemn_reasons->set_gen (gen_alloc_budget, n); } dprintf (GTC_LOG, ("h%d: g%d budget", heap_number, ((get_new_allocation (loh_generation) <= 0) ? 3 : n))); n_alloc = n; #if defined(BACKGROUND_GC) && !defined(MULTIPLE_HEAPS) //time based tuning // if enough time has elapsed since the last gc // and the number of gc is too low (1/10 of lower gen) then collect // This should also be enabled if we have memory concerns int n_time_max = max_generation; if (!check_only_p) { if (!check_max_gen_alloc) { n_time_max = max_generation - 1; } } if ((local_settings->pause_mode == pause_interactive) || (local_settings->pause_mode == pause_sustained_low_latency)) { dynamic_data* dd0 = dynamic_data_of (0); uint64_t now = GetHighPrecisionTimeStamp(); temp_gen = n; for (i = (temp_gen+1); i <= n_time_max; i++) { dynamic_data* dd = dynamic_data_of (i); if ((now > dd_time_clock(dd) + dd_time_clock_interval(dd)) && (dd_gc_clock (dd0) > (dd_gc_clock (dd) + dd_gc_clock_interval(dd))) && ((n < max_generation) || ((dd_current_size (dd) < dd_max_size (dd0))))) { n = min (i, n_time_max); dprintf (GTC_LOG, ("time %d", n)); } } if (n > temp_gen) { local_condemn_reasons->set_gen (gen_time_tuning, n); if (n == max_generation) { dprintf (BGC_TUNING_LOG, ("BTL[GTC]: trigger based on time")); } } } if (n != n_alloc) { dprintf (GTC_LOG, ("Condemning %d based on time tuning and fragmentation", n)); } #endif //BACKGROUND_GC && !MULTIPLE_HEAPS if (n < (max_generation - 1)) { if (dt_low_card_table_efficiency_p (tuning_deciding_condemned_gen)) { n = max (n, max_generation - 1); local_settings->promotion = TRUE; dprintf (GTC_LOG, ("h%d: skip %d, c %d", heap_number, generation_skip_ratio, n)); local_condemn_reasons->set_condition (gen_low_card_p); } } if (!check_only_p) { generation_skip_ratio = 100; } if (dt_low_ephemeral_space_p (check_only_p ? tuning_deciding_full_gc : tuning_deciding_condemned_gen)) { low_ephemeral_space = TRUE; n = max (n, max_generation - 1); local_condemn_reasons->set_condition (gen_low_ephemeral_p); dprintf (GTC_LOG, ("h%d: low eph", heap_number)); if (!provisional_mode_triggered) { #ifdef BACKGROUND_GC if (!gc_can_use_concurrent || (generation_free_list_space (generation_of (max_generation)) == 0)) #endif //BACKGROUND_GC { //It is better to defragment first if we are running out of space for //the ephemeral generation but we have enough fragmentation to make up for it //in the non ephemeral generation. Essentially we are trading a gen2 for // having to expand heap in ephemeral collections. if (dt_high_frag_p (tuning_deciding_condemned_gen, max_generation - 1, TRUE)) { high_fragmentation = TRUE; local_condemn_reasons->set_condition (gen_max_high_frag_e_p); dprintf (GTC_LOG, ("heap%d: gen1 frag", heap_number)); } } } } #ifdef USE_REGIONS if (!try_get_new_free_region()) { dprintf (GTC_LOG, ("can't get an empty region -> full compacting")); last_gc_before_oom = TRUE; } #endif //USE_REGIONS //figure out which ephemeral generation is too fragmented temp_gen = n; for (i = n+1; i < max_generation; i++) { if (dt_high_frag_p (tuning_deciding_condemned_gen, i)) { dprintf (GTC_LOG, ("h%d g%d too frag", heap_number, i)); n = i; } else break; } if (low_ephemeral_space) { //enable promotion local_settings->promotion = TRUE; } if (n > temp_gen) { local_condemn_reasons->set_condition (gen_eph_high_frag_p); } if (!check_only_p) { if (settings.pause_mode == pause_low_latency) { if (!is_induced (settings.reason)) { n = min (n, max_generation - 1); dprintf (GTC_LOG, ("low latency mode is enabled, condemning %d", n)); evaluate_elevation = FALSE; goto exit; } } } // It's hard to catch when we get to the point that the memory load is so high // we get an induced GC from the finalizer thread so we are checking the memory load // for every gen0 GC. check_memory = (check_only_p ? (n >= 0) : ((n >= 1) || low_memory_detected)); if (check_memory) { //find out if we are short on memory get_memory_info (&memory_load, &available_physical, &available_page_file); if (heap_number == 0) { dprintf (GTC_LOG, ("ml: %d", memory_load)); } #ifdef USE_REGIONS // For regions we want to take the VA range into consideration as well. uint32_t va_memory_load = global_region_allocator.get_va_memory_load(); if (heap_number == 0) { dprintf (GTC_LOG, ("h%d ML %d, va ML %d", heap_number, memory_load, va_memory_load)); } memory_load = max (memory_load, va_memory_load); #endif //USE_REGIONS // Need to get it early enough for all heaps to use. local_settings->entry_available_physical_mem = available_physical; local_settings->entry_memory_load = memory_load; // @TODO: Force compaction more often under GCSTRESS if (memory_load >= high_memory_load_th || low_memory_detected) { #ifdef SIMPLE_DPRINTF // stress log can't handle any parameter that's bigger than a void*. if (heap_number == 0) { dprintf (GTC_LOG, ("tp: %I64d, ap: %I64d", total_physical_mem, available_physical)); } #endif //SIMPLE_DPRINTF high_memory_load = TRUE; if (memory_load >= v_high_memory_load_th || low_memory_detected) { // TODO: Perhaps in 64-bit we should be estimating gen1's fragmentation as well since // gen1/gen0 may take a lot more memory than gen2. if (!high_fragmentation) { high_fragmentation = dt_estimate_reclaim_space_p (tuning_deciding_condemned_gen, max_generation); } v_high_memory_load = TRUE; } else { if (!high_fragmentation) { high_fragmentation = dt_estimate_high_frag_p (tuning_deciding_condemned_gen, max_generation, available_physical); } } if (high_fragmentation) { if (high_memory_load) { local_condemn_reasons->set_condition (gen_max_high_frag_m_p); } else if (v_high_memory_load) { local_condemn_reasons->set_condition (gen_max_high_frag_vm_p); } } } } dprintf (GTC_LOG, ("h%d: le: %d, hm: %d, vm: %d, f: %d", heap_number, low_ephemeral_space, high_memory_load, v_high_memory_load, high_fragmentation)); if (should_expand_in_full_gc) { dprintf (GTC_LOG, ("h%d: expand_in_full - BLOCK", heap_number)); *blocking_collection_p = TRUE; evaluate_elevation = FALSE; n = max_generation; local_condemn_reasons->set_condition (gen_expand_fullgc_p); } if (last_gc_before_oom) { dprintf (GTC_LOG, ("h%d: alloc full - BLOCK", heap_number)); n = max_generation; *blocking_collection_p = TRUE; if ((local_settings->reason == reason_oos_loh) || (local_settings->reason == reason_alloc_loh)) { evaluate_elevation = FALSE; } local_condemn_reasons->set_condition (gen_before_oom); } if (!check_only_p) { if (is_induced_blocking (settings.reason) && n_initial == max_generation IN_STRESS_HEAP( && !settings.stress_induced )) { if (heap_number == 0) { dprintf (GTC_LOG, ("induced - BLOCK")); } *blocking_collection_p = TRUE; local_condemn_reasons->set_condition (gen_induced_fullgc_p); evaluate_elevation = FALSE; } if (settings.reason == reason_induced_noforce) { local_condemn_reasons->set_condition (gen_induced_noforce_p); evaluate_elevation = FALSE; } } if (!provisional_mode_triggered && evaluate_elevation && (low_ephemeral_space || high_memory_load || v_high_memory_load)) { *elevation_requested_p = TRUE; #ifdef HOST_64BIT // if we are in high memory load and have consumed 10% of the gen2 budget, do a gen2 now. if (high_memory_load || v_high_memory_load) { dynamic_data* dd_max = dynamic_data_of (max_generation); if (((float)dd_new_allocation (dd_max) / (float)dd_desired_allocation (dd_max)) < 0.9) { dprintf (GTC_LOG, ("%Id left in gen2 alloc (%Id)", dd_new_allocation (dd_max), dd_desired_allocation (dd_max))); n = max_generation; local_condemn_reasons->set_condition (gen_almost_max_alloc); } } if (n <= max_generation) { #endif // HOST_64BIT if (high_fragmentation) { //elevate to max_generation n = max_generation; dprintf (GTC_LOG, ("h%d: f full", heap_number)); #ifdef BACKGROUND_GC if (high_memory_load || v_high_memory_load) { // For background GC we want to do blocking collections more eagerly because we don't // want to get into the situation where the memory load becomes high while we are in // a background GC and we'd have to wait for the background GC to finish to start // a blocking collection (right now the implemenation doesn't handle converting // a background GC to a blocking collection midway. dprintf (GTC_LOG, ("h%d: bgc - BLOCK", heap_number)); *blocking_collection_p = TRUE; } #else if (v_high_memory_load) { dprintf (GTC_LOG, ("h%d: - BLOCK", heap_number)); *blocking_collection_p = TRUE; } #endif //BACKGROUND_GC } else { n = max (n, max_generation - 1); dprintf (GTC_LOG, ("h%d: nf c %d", heap_number, n)); } #ifdef HOST_64BIT } #endif // HOST_64BIT } if (!provisional_mode_triggered && (n == (max_generation - 1)) && (n_alloc < (max_generation -1))) { #ifdef BGC_SERVO_TUNING if (!bgc_tuning::enable_fl_tuning) #endif //BGC_SERVO_TUNING { dprintf (GTC_LOG, ("h%d: budget %d, check 2", heap_number, n_alloc)); if (get_new_allocation (max_generation) <= 0) { dprintf (GTC_LOG, ("h%d: budget alloc", heap_number)); n = max_generation; local_condemn_reasons->set_condition (gen_max_gen1); } } } //figure out if max_generation is too fragmented -> blocking collection if (!provisional_mode_triggered #ifdef BGC_SERVO_TUNING && !bgc_tuning::enable_fl_tuning #endif //BGC_SERVO_TUNING && (n == max_generation)) { if (dt_high_frag_p (tuning_deciding_condemned_gen, n)) { dprintf (GTC_LOG, ("h%d: g%d too frag", heap_number, n)); local_condemn_reasons->set_condition (gen_max_high_frag_p); if (local_settings->pause_mode != pause_sustained_low_latency) { *blocking_collection_p = TRUE; } } } #ifdef BACKGROUND_GC if ((n == max_generation) && !(*blocking_collection_p)) { if (heap_number == 0) { BOOL bgc_heap_too_small = TRUE; size_t gen2size = 0; size_t gen3size = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { if (((g_heaps[i]->current_generation_size (max_generation)) > bgc_min_per_heap) || ((g_heaps[i]->current_generation_size (loh_generation)) > bgc_min_per_heap) || ((g_heaps[i]->current_generation_size (poh_generation)) > bgc_min_per_heap)) { bgc_heap_too_small = FALSE; break; } } #else //MULTIPLE_HEAPS if ((current_generation_size (max_generation) > bgc_min_per_heap) || (current_generation_size (loh_generation) > bgc_min_per_heap) || (current_generation_size (poh_generation) > bgc_min_per_heap)) { bgc_heap_too_small = FALSE; } #endif //MULTIPLE_HEAPS if (bgc_heap_too_small) { dprintf (GTC_LOG, ("gen2 and gen3 too small")); #ifdef STRESS_HEAP // do not turn stress-induced collections into blocking GCs if (!settings.stress_induced) #endif //STRESS_HEAP { *blocking_collection_p = TRUE; } local_condemn_reasons->set_condition (gen_gen2_too_small); } } } #endif //BACKGROUND_GC exit: if (!check_only_p) { #ifdef STRESS_HEAP #ifdef BACKGROUND_GC // We can only do Concurrent GC Stress if the caller did not explicitly ask for all // generations to be collected, if (orig_gen != max_generation && g_pConfig->GetGCStressLevel() && gc_can_use_concurrent) { *elevation_requested_p = FALSE; } #endif //BACKGROUND_GC #endif //STRESS_HEAP if (check_memory) { fgm_result.available_pagefile_mb = (size_t)(available_page_file / (1024 * 1024)); } local_condemn_reasons->set_gen (gen_final_per_heap, n); get_gc_data_per_heap()->gen_to_condemn_reasons.init (local_condemn_reasons); #ifdef DT_LOG local_condemn_reasons->print (heap_number); #endif //DT_LOG if ((local_settings->reason == reason_oos_soh) || (local_settings->reason == reason_oos_loh)) { assert (n >= 1); } } return n; } #ifdef _PREFAST_ #pragma warning(pop) #endif //_PREFAST_ inline size_t gc_heap::min_reclaim_fragmentation_threshold (uint32_t num_heaps) { // if the memory load is higher, the threshold we'd want to collect gets lower. size_t min_mem_based_on_available = (500 - (settings.entry_memory_load - high_memory_load_th) * 40) * 1024 * 1024 / num_heaps; size_t ten_percent_size = (size_t)((float)generation_size (max_generation) * 0.10); uint64_t three_percent_mem = mem_one_percent * 3 / num_heaps; #ifdef SIMPLE_DPRINTF dprintf (GTC_LOG, ("min av: %Id, 10%% gen2: %Id, 3%% mem: %I64d", min_mem_based_on_available, ten_percent_size, three_percent_mem)); #endif //SIMPLE_DPRINTF return (size_t)(min (min_mem_based_on_available, min (ten_percent_size, three_percent_mem))); } inline uint64_t gc_heap::min_high_fragmentation_threshold(uint64_t available_mem, uint32_t num_heaps) { return min (available_mem, (256*1024*1024)) / num_heaps; } enum { CORINFO_EXCEPTION_GC = 0xE0004743 // 'GC' }; #ifdef BACKGROUND_GC void gc_heap::init_background_gc () { //reset the allocation so foreground gc can allocate into older (max_generation) generation generation* gen = generation_of (max_generation); generation_allocation_pointer (gen)= 0; generation_allocation_limit (gen) = 0; generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(generation_allocation_segment(gen) != NULL); #ifdef DOUBLY_LINKED_FL generation_set_bgc_mark_bit_p (gen) = FALSE; #endif //DOUBLY_LINKED_FL #ifndef USE_REGIONS //reset the plan allocation for each segment for (heap_segment* seg = generation_allocation_segment (gen); seg != ephemeral_heap_segment; seg = heap_segment_next_rw (seg)) { heap_segment_plan_allocated (seg) = heap_segment_allocated (seg); } #endif //!USE_REGIONS if (heap_number == 0) { dprintf (2, ("heap%d: bgc lowest: %Ix, highest: %Ix", heap_number, background_saved_lowest_address, background_saved_highest_address)); } } #endif //BACKGROUND_GC inline void fire_drain_mark_list_event (size_t mark_list_objects) { FIRE_EVENT(BGCDrainMark, mark_list_objects); } inline void fire_revisit_event (size_t dirtied_pages, size_t marked_objects, BOOL large_objects_p) { FIRE_EVENT(BGCRevisit, dirtied_pages, marked_objects, large_objects_p); } inline void fire_overflow_event (uint8_t* overflow_min, uint8_t* overflow_max, size_t marked_objects, int gen_number) { FIRE_EVENT(BGCOverflow_V1, (uint64_t)overflow_min, (uint64_t)overflow_max, marked_objects, gen_number == loh_generation, gen_number); } void gc_heap::concurrent_print_time_delta (const char* msg) { #ifdef TRACE_GC uint64_t current_time = GetHighPrecisionTimeStamp(); size_t elapsed_time_ms = (size_t)((current_time - time_bgc_last) / 1000); time_bgc_last = current_time; dprintf (2, ("h%d: %s T %Id ms", heap_number, msg, elapsed_time_ms)); #else UNREFERENCED_PARAMETER(msg); #endif //TRACE_GC } void gc_heap::free_list_info (int gen_num, const char* msg) { #if defined (BACKGROUND_GC) && defined (TRACE_GC) dprintf (3, ("h%d: %s", heap_number, msg)); for (int i = 0; i < total_generation_count; i++) { generation* gen = generation_of (i); if ((generation_allocation_size (gen) == 0) && (generation_free_list_space (gen) == 0) && (generation_free_obj_space (gen) == 0)) { // don't print if everything is 0. } else { dprintf (3, ("h%d: g%d: a-%Id, fl-%Id, fo-%Id", heap_number, i, generation_allocation_size (gen), generation_free_list_space (gen), generation_free_obj_space (gen))); } } #else UNREFERENCED_PARAMETER(gen_num); UNREFERENCED_PARAMETER(msg); #endif // BACKGROUND_GC && TRACE_GC } void gc_heap::update_collection_counts_for_no_gc() { assert (settings.pause_mode == pause_no_gc); settings.condemned_generation = max_generation; #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) g_heaps[i]->update_collection_counts(); #else //MULTIPLE_HEAPS update_collection_counts(); #endif //MULTIPLE_HEAPS full_gc_counts[gc_type_blocking]++; } BOOL gc_heap::should_proceed_with_gc() { if (gc_heap::settings.pause_mode == pause_no_gc) { if (current_no_gc_region_info.started) { // The no_gc mode was already in progress yet we triggered another GC, // this effectively exits the no_gc mode. restore_data_for_no_gc(); } else return should_proceed_for_no_gc(); } return TRUE; } void gc_heap::update_end_gc_time_per_heap() { for (int gen_number = 0; gen_number <= settings.condemned_generation; gen_number++) { dynamic_data* dd = dynamic_data_of (gen_number); dd_gc_elapsed_time (dd) = (size_t)(end_gc_time - dd_time_clock (dd)); } } void gc_heap::update_end_ngc_time() { end_gc_time = GetHighPrecisionTimeStamp(); #ifdef HEAP_BALANCE_INSTRUMENTATION last_gc_end_time_us = end_gc_time; dprintf (HEAP_BALANCE_LOG, ("[GC#%Id-%Id-%Id]", settings.gc_index, (last_gc_end_time_us - dd_time_clock (dynamic_data_of (0))), dd_time_clock (dynamic_data_of (0)))); #endif //HEAP_BALANCE_INSTRUMENTATION } size_t gc_heap::exponential_smoothing (int gen, size_t collection_count, size_t desired_per_heap) { // to avoid spikes in mem usage due to short terms fluctuations in survivorship, // apply some smoothing. size_t smoothing = min(3, collection_count); size_t new_smoothed_desired_per_heap = desired_per_heap / smoothing + ((smoothed_desired_per_heap[gen] / smoothing) * (smoothing - 1)); dprintf (2, ("new smoothed_desired_per_heap for gen %d = %Id, desired_per_heap = %Id", gen, new_smoothed_desired_per_heap, desired_per_heap)); smoothed_desired_per_heap[gen] = new_smoothed_desired_per_heap; return Align (smoothed_desired_per_heap[gen], get_alignment_constant (gen <= soh_gen2)); } //internal part of gc used by the serial and concurrent version void gc_heap::gc1() { #ifdef BACKGROUND_GC assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread())); #endif //BACKGROUND_GC verify_soh_segment_list(); int n = settings.condemned_generation; if (settings.reason == reason_pm_full_gc) { assert (n == max_generation); init_records(); gen_to_condemn_tuning* local_condemn_reasons = &(get_gc_data_per_heap()->gen_to_condemn_reasons); local_condemn_reasons->init(); local_condemn_reasons->set_gen (gen_initial, n); local_condemn_reasons->set_gen (gen_final_per_heap, n); } update_collection_counts (); #ifdef BACKGROUND_GC bgc_alloc_lock->check(); #endif //BACKGROUND_GC free_list_info (max_generation, "beginning"); vm_heap->GcCondemnedGeneration = settings.condemned_generation; assert (g_gc_card_table == card_table); #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES assert (g_gc_card_bundle_table == card_bundle_table); #endif { #ifndef USE_REGIONS if (n == max_generation) { gc_low = lowest_address; gc_high = highest_address; } else { gc_low = generation_allocation_start (generation_of (n)); gc_high = heap_segment_reserved (ephemeral_heap_segment); } #endif //USE_REGIONS #ifdef BACKGROUND_GC if (settings.concurrent) { #ifdef TRACE_GC time_bgc_last = GetHighPrecisionTimeStamp(); #endif //TRACE_GC FIRE_EVENT(BGCBegin); concurrent_print_time_delta ("BGC"); concurrent_print_time_delta ("RW"); background_mark_phase(); free_list_info (max_generation, "after mark phase"); background_sweep(); free_list_info (max_generation, "after sweep phase"); } else #endif //BACKGROUND_GC { mark_phase (n, FALSE); check_gen0_bricks(); GCScan::GcRuntimeStructuresValid (FALSE); plan_phase (n); GCScan::GcRuntimeStructuresValid (TRUE); check_gen0_bricks(); } } //adjust the allocation size from the pinned quantities. for (int gen_number = 0; gen_number <= min (max_generation,n+1); gen_number++) { generation* gn = generation_of (gen_number); if (settings.compaction) { generation_pinned_allocated (gn) += generation_pinned_allocation_compact_size (gn); generation_allocation_size (generation_of (gen_number)) += generation_pinned_allocation_compact_size (gn); } else { generation_pinned_allocated (gn) += generation_pinned_allocation_sweep_size (gn); generation_allocation_size (generation_of (gen_number)) += generation_pinned_allocation_sweep_size (gn); } generation_pinned_allocation_sweep_size (gn) = 0; generation_pinned_allocation_compact_size (gn) = 0; } #ifdef BACKGROUND_GC if (settings.concurrent) { dynamic_data* dd = dynamic_data_of (n); end_gc_time = GetHighPrecisionTimeStamp(); dd_gc_elapsed_time (dd) = (size_t)(end_gc_time - dd_time_clock (dd)); #ifdef HEAP_BALANCE_INSTRUMENTATION if (heap_number == 0) { last_gc_end_time_us = end_gc_time; dprintf (HEAP_BALANCE_LOG, ("[GC#%Id-%Id-BGC]", settings.gc_index, dd_gc_elapsed_time (dd))); } #endif //HEAP_BALANCE_INSTRUMENTATION free_list_info (max_generation, "after computing new dynamic data"); gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap(); for (int gen_number = 0; gen_number < max_generation; gen_number++) { dprintf (2, ("end of BGC: gen%d new_alloc: %Id", gen_number, dd_desired_allocation (dynamic_data_of (gen_number)))); current_gc_data_per_heap->gen_data[gen_number].size_after = generation_size (gen_number); current_gc_data_per_heap->gen_data[gen_number].free_list_space_after = generation_free_list_space (generation_of (gen_number)); current_gc_data_per_heap->gen_data[gen_number].free_obj_space_after = generation_free_obj_space (generation_of (gen_number)); } } else #endif //BACKGROUND_GC { free_list_info (max_generation, "end"); for (int gen_number = 0; gen_number <= n; gen_number++) { compute_new_dynamic_data (gen_number); } if (n != max_generation) { for (int gen_number = (n + 1); gen_number < total_generation_count; gen_number++) { get_gc_data_per_heap()->gen_data[gen_number].size_after = generation_size (gen_number); get_gc_data_per_heap()->gen_data[gen_number].free_list_space_after = generation_free_list_space (generation_of (gen_number)); get_gc_data_per_heap()->gen_data[gen_number].free_obj_space_after = generation_free_obj_space (generation_of (gen_number)); } } get_gc_data_per_heap()->maxgen_size_info.running_free_list_efficiency = (uint32_t)(generation_allocator_efficiency (generation_of (max_generation)) * 100); free_list_info (max_generation, "after computing new dynamic data"); } if (n < max_generation) { int highest_gen_number = #ifdef USE_REGIONS max_generation; #else //USE_REGIONS 1 + n; #endif //USE_REGIONS for (int older_gen_idx = (1 + n); older_gen_idx <= highest_gen_number; older_gen_idx++) { compute_promoted_allocation (older_gen_idx); dynamic_data* dd = dynamic_data_of (older_gen_idx); size_t new_fragmentation = generation_free_list_space (generation_of (older_gen_idx)) + generation_free_obj_space (generation_of (older_gen_idx)); #ifdef BACKGROUND_GC if (current_c_gc_state != c_gc_state_planning) #endif //BACKGROUND_GC { if (settings.promotion) { dd_fragmentation (dd) = new_fragmentation; } else { //assert (dd_fragmentation (dd) == new_fragmentation); } } } } #ifdef BACKGROUND_GC if (!settings.concurrent) #endif //BACKGROUND_GC { #ifndef FEATURE_REDHAWK // GCToEEInterface::IsGCThread() always returns false on CoreRT, but this assert is useful in CoreCLR. assert(GCToEEInterface::IsGCThread()); #endif // FEATURE_REDHAWK adjust_ephemeral_limits(); } #if defined(BACKGROUND_GC) && !defined(USE_REGIONS) assert (ephemeral_low == generation_allocation_start (generation_of ( max_generation -1))); assert (ephemeral_high == heap_segment_reserved (ephemeral_heap_segment)); #endif //BACKGROUND_GC && !USE_REGIONS if (fgn_maxgen_percent) { if (settings.condemned_generation == (max_generation - 1)) { check_for_full_gc (max_generation - 1, 0); } else if (settings.condemned_generation == max_generation) { if (full_gc_approach_event_set #ifdef MULTIPLE_HEAPS && (heap_number == 0) #endif //MULTIPLE_HEAPS ) { dprintf (2, ("FGN-GC: setting gen2 end event")); full_gc_approach_event.Reset(); #ifdef BACKGROUND_GC // By definition WaitForFullGCComplete only succeeds if it's full, *blocking* GC, otherwise need to return N/A fgn_last_gc_was_concurrent = settings.concurrent ? TRUE : FALSE; #endif //BACKGROUND_GC full_gc_end_event.Set(); full_gc_approach_event_set = false; } } } #ifdef BACKGROUND_GC if (!settings.concurrent) #endif //BACKGROUND_GC { //decide on the next allocation quantum if (alloc_contexts_used >= 1) { allocation_quantum = Align (min ((size_t)CLR_SIZE, (size_t)max (1024, get_new_allocation (0) / (2 * alloc_contexts_used))), get_alignment_constant(FALSE)); dprintf (3, ("New allocation quantum: %d(0x%Ix)", allocation_quantum, allocation_quantum)); } } descr_generations ("END"); verify_soh_segment_list(); #ifdef BACKGROUND_GC if (gc_can_use_concurrent) { check_bgc_mark_stack_length(); } assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread())); #endif //BACKGROUND_GC #if defined(VERIFY_HEAP) || (defined (FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC)) if (FALSE #ifdef VERIFY_HEAP // Note that right now g_pConfig->GetHeapVerifyLevel always returns the same // value. If we ever allow randomly adjusting this as the process runs, // we cannot call it this way as joins need to match - we must have the same // value for all heaps like we do with bgc_heap_walk_for_etw_p. || (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) #endif #if defined(FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC) || (bgc_heap_walk_for_etw_p && settings.concurrent) #endif ) { #ifdef BACKGROUND_GC bool cooperative_mode = true; if (settings.concurrent) { cooperative_mode = enable_preemptive (); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_suspend_ee_verify); if (bgc_t_join.joined()) { bgc_threads_sync_event.Reset(); dprintf(2, ("Joining BGC threads to suspend EE for verify heap")); bgc_t_join.restart(); } if (heap_number == 0) { // need to take the gc_lock in preparation for verify_heap below // *before* we suspend the EE, otherwise we get a deadlock enter_gc_lock_for_verify_heap(); suspend_EE(); bgc_threads_sync_event.Set(); } else { bgc_threads_sync_event.Wait(INFINITE, FALSE); dprintf (2, ("bgc_threads_sync_event is signalled")); } #else //MULTIPLE_HEAPS // need to take the gc_lock in preparation for verify_heap below // *before* we suspend the EE, otherwise we get a deadlock enter_gc_lock_for_verify_heap(); suspend_EE(); #endif //MULTIPLE_HEAPS //fix the allocation area so verify_heap can proceed. fix_allocation_contexts (FALSE); } #endif //BACKGROUND_GC #ifdef BACKGROUND_GC assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread())); #ifdef FEATURE_EVENT_TRACE if (bgc_heap_walk_for_etw_p && settings.concurrent) { GCToEEInterface::DiagWalkBGCSurvivors(__this); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_after_profiler_heap_walk); if (bgc_t_join.joined()) { bgc_t_join.restart(); } #endif // MULTIPLE_HEAPS } #endif // FEATURE_EVENT_TRACE #endif //BACKGROUND_GC #ifdef VERIFY_HEAP if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) verify_heap (FALSE); #endif // VERIFY_HEAP #ifdef BACKGROUND_GC if (settings.concurrent) { repair_allocation_contexts (TRUE); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_restart_ee_verify); if (bgc_t_join.joined()) { bgc_threads_sync_event.Reset(); dprintf(2, ("Joining BGC threads to restart EE after verify heap")); bgc_t_join.restart(); } if (heap_number == 0) { restart_EE(); leave_gc_lock_for_verify_heap(); bgc_threads_sync_event.Set(); } else { bgc_threads_sync_event.Wait(INFINITE, FALSE); dprintf (2, ("bgc_threads_sync_event is signalled")); } #else //MULTIPLE_HEAPS restart_EE(); leave_gc_lock_for_verify_heap(); #endif //MULTIPLE_HEAPS disable_preemptive (cooperative_mode); } #endif //BACKGROUND_GC } #endif //VERIFY_HEAP || (FEATURE_EVENT_TRACE && BACKGROUND_GC) #ifdef MULTIPLE_HEAPS if (!settings.concurrent) { gc_t_join.join(this, gc_join_done); if (gc_t_join.joined ()) { gc_heap::internal_gc_done = false; //equalize the new desired size of the generations int limit = settings.condemned_generation; if (limit == max_generation) { limit = total_generation_count-1; } for (int gen = 0; gen <= limit; gen++) { size_t total_desired = 0; size_t total_already_consumed = 0; for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; dynamic_data* dd = hp->dynamic_data_of (gen); size_t temp_total_desired = total_desired + dd_desired_allocation (dd); if (temp_total_desired < total_desired) { // we overflowed. total_desired = (size_t)MAX_PTR; break; } total_desired = temp_total_desired; // for gen 1 and gen 2, there may have been some incoming size // already accounted for assert ((ptrdiff_t)dd_desired_allocation (dd) >= dd_new_allocation (dd)); size_t already_consumed = dd_desired_allocation (dd) - dd_new_allocation (dd); size_t temp_total_already_consumed = total_already_consumed + already_consumed; // we should never have an overflow here as the consumed size should always fit in a size_t assert (temp_total_already_consumed >= total_already_consumed); total_already_consumed = temp_total_already_consumed; } size_t desired_per_heap = Align (total_desired/gc_heap::n_heaps, get_alignment_constant (gen <= max_generation)); size_t already_consumed_per_heap = total_already_consumed / gc_heap::n_heaps; if (gen == 0) { #if 1 //subsumed by the linear allocation model // to avoid spikes in mem usage due to short terms fluctuations in survivorship, // apply some smoothing. desired_per_heap = exponential_smoothing (gen, dd_collection_count (dynamic_data_of(gen)), desired_per_heap); #endif //0 if (!heap_hard_limit) { // if desired_per_heap is close to min_gc_size, trim it // down to min_gc_size to stay in the cache gc_heap* hp = gc_heap::g_heaps[0]; dynamic_data* dd = hp->dynamic_data_of (gen); size_t min_gc_size = dd_min_size(dd); // if min GC size larger than true on die cache, then don't bother // limiting the desired size if ((min_gc_size <= GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE)) && desired_per_heap <= 2*min_gc_size) { desired_per_heap = min_gc_size; } } #ifdef HOST_64BIT desired_per_heap = joined_youngest_desired (desired_per_heap); dprintf (2, ("final gen0 new_alloc: %Id", desired_per_heap)); #endif // HOST_64BIT gc_data_global.final_youngest_desired = desired_per_heap; } #if 1 //subsumed by the linear allocation model if (gen >= uoh_start_generation) { // to avoid spikes in mem usage due to short terms fluctuations in survivorship, // apply some smoothing. desired_per_heap = exponential_smoothing (gen, dd_collection_count (dynamic_data_of (max_generation)), desired_per_heap); } #endif //0 for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; dynamic_data* dd = hp->dynamic_data_of (gen); dd_desired_allocation (dd) = desired_per_heap; dd_gc_new_allocation (dd) = desired_per_heap; dd_new_allocation (dd) = desired_per_heap - already_consumed_per_heap; if (gen == 0) { hp->fgn_last_alloc = desired_per_heap; } } } #ifdef FEATURE_LOH_COMPACTION BOOL all_heaps_compacted_p = TRUE; #endif //FEATURE_LOH_COMPACTION int max_gen0_must_clear_bricks = 0; for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; hp->decommit_ephemeral_segment_pages(); hp->rearrange_uoh_segments(); #ifdef FEATURE_LOH_COMPACTION all_heaps_compacted_p &= hp->loh_compacted_p; #endif //FEATURE_LOH_COMPACTION // compute max of gen0_must_clear_bricks over all heaps max_gen0_must_clear_bricks = max(max_gen0_must_clear_bricks, hp->gen0_must_clear_bricks); } #ifdef USE_REGIONS distribute_free_regions(); #endif //USE_REGIONS #ifdef FEATURE_LOH_COMPACTION check_loh_compact_mode (all_heaps_compacted_p); #endif //FEATURE_LOH_COMPACTION // if max_gen0_must_clear_bricks > 0, distribute to all heaps - // if one heap encountered an interior pointer during this GC, // the next GC might see one on another heap if (max_gen0_must_clear_bricks > 0) { for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; hp->gen0_must_clear_bricks = max_gen0_must_clear_bricks; } } for (int i = 0; i < gc_heap::n_heaps; i++) { g_heaps[i]->descr_generations ("END"); #ifdef USE_REGIONS if (settings.condemned_generation == max_generation) { // age and print all kinds of free regions region_free_list::age_free_regions (g_heaps[i]->free_regions); region_free_list::print (g_heaps[i]->free_regions, i, "END"); } else { // age and print only basic free regions g_heaps[i]->free_regions[basic_free_region].age_free_regions(); g_heaps[i]->free_regions[basic_free_region].print (i, "END"); } #endif //USE_REGIONS } fire_pevents(); update_end_ngc_time(); pm_full_gc_init_or_clear(); gc_t_join.restart(); } update_end_gc_time_per_heap(); add_to_history_per_heap(); alloc_context_count = 0; heap_select::mark_heap (heap_number); } #else //MULTIPLE_HEAPS gc_data_global.final_youngest_desired = dd_desired_allocation (dynamic_data_of (0)); #ifdef FEATURE_LOH_COMPACTION check_loh_compact_mode (loh_compacted_p); #endif //FEATURE_LOH_COMPACTION decommit_ephemeral_segment_pages(); fire_pevents(); if (!(settings.concurrent)) { #ifdef USE_REGIONS distribute_free_regions(); if (settings.condemned_generation == max_generation) { // age and print all kinds of free regions region_free_list::age_free_regions(free_regions); region_free_list::print(free_regions, 0, "END"); } else { // age and print only basic free regions free_regions[basic_free_region].age_free_regions(); free_regions[basic_free_region].print (0, "END"); } #endif //USE_REGIONS rearrange_uoh_segments(); update_end_ngc_time(); update_end_gc_time_per_heap(); add_to_history_per_heap(); do_post_gc(); } pm_full_gc_init_or_clear(); #ifdef BACKGROUND_GC recover_bgc_settings(); #endif //BACKGROUND_GC #endif //MULTIPLE_HEAPS } void gc_heap::save_data_for_no_gc() { current_no_gc_region_info.saved_pause_mode = settings.pause_mode; #ifdef MULTIPLE_HEAPS // This is to affect heap balancing. for (int i = 0; i < n_heaps; i++) { current_no_gc_region_info.saved_gen0_min_size = dd_min_size (g_heaps[i]->dynamic_data_of (0)); dd_min_size (g_heaps[i]->dynamic_data_of (0)) = min_balance_threshold; current_no_gc_region_info.saved_gen3_min_size = dd_min_size (g_heaps[i]->dynamic_data_of (loh_generation)); dd_min_size (g_heaps[i]->dynamic_data_of (loh_generation)) = 0; } #endif //MULTIPLE_HEAPS } void gc_heap::restore_data_for_no_gc() { gc_heap::settings.pause_mode = current_no_gc_region_info.saved_pause_mode; #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { dd_min_size (g_heaps[i]->dynamic_data_of (0)) = current_no_gc_region_info.saved_gen0_min_size; dd_min_size (g_heaps[i]->dynamic_data_of (loh_generation)) = current_no_gc_region_info.saved_gen3_min_size; } #endif //MULTIPLE_HEAPS } start_no_gc_region_status gc_heap::prepare_for_no_gc_region (uint64_t total_size, BOOL loh_size_known, uint64_t loh_size, BOOL disallow_full_blocking) { if (current_no_gc_region_info.started) { return start_no_gc_in_progress; } start_no_gc_region_status status = start_no_gc_success; save_data_for_no_gc(); settings.pause_mode = pause_no_gc; current_no_gc_region_info.start_status = start_no_gc_success; uint64_t allocation_no_gc_loh = 0; uint64_t allocation_no_gc_soh = 0; assert(total_size != 0); if (loh_size_known) { assert(loh_size != 0); assert(loh_size <= total_size); allocation_no_gc_loh = loh_size; allocation_no_gc_soh = total_size - loh_size; } else { allocation_no_gc_soh = total_size; allocation_no_gc_loh = total_size; } int soh_align_const = get_alignment_constant (TRUE); #ifdef USE_REGIONS size_t max_soh_allocated = SIZE_T_MAX; #else size_t max_soh_allocated = soh_segment_size - segment_info_size - eph_gen_starts_size; #endif size_t size_per_heap = 0; const double scale_factor = 1.05; int num_heaps = get_num_heaps(); uint64_t total_allowed_soh_allocation = (uint64_t)max_soh_allocated * num_heaps; // [LOCALGC TODO] // In theory, the upper limit here is the physical memory of the machine, not // SIZE_T_MAX. This is not true today because total_physical_mem can be // larger than SIZE_T_MAX if running in wow64 on a machine with more than // 4GB of RAM. Once Local GC code divergence is resolved and code is flowing // more freely between branches, it would be good to clean this up to use // total_physical_mem instead of SIZE_T_MAX. assert(total_allowed_soh_allocation <= SIZE_T_MAX); uint64_t total_allowed_loh_allocation = SIZE_T_MAX; uint64_t total_allowed_soh_alloc_scaled = allocation_no_gc_soh > 0 ? static_cast<uint64_t>(total_allowed_soh_allocation / scale_factor) : 0; uint64_t total_allowed_loh_alloc_scaled = allocation_no_gc_loh > 0 ? static_cast<uint64_t>(total_allowed_loh_allocation / scale_factor) : 0; if (allocation_no_gc_soh > total_allowed_soh_alloc_scaled || allocation_no_gc_loh > total_allowed_loh_alloc_scaled) { status = start_no_gc_too_large; goto done; } if (allocation_no_gc_soh > 0) { allocation_no_gc_soh = static_cast<uint64_t>(allocation_no_gc_soh * scale_factor); allocation_no_gc_soh = min (allocation_no_gc_soh, total_allowed_soh_alloc_scaled); } if (allocation_no_gc_loh > 0) { allocation_no_gc_loh = static_cast<uint64_t>(allocation_no_gc_loh * scale_factor); allocation_no_gc_loh = min (allocation_no_gc_loh, total_allowed_loh_alloc_scaled); } if (disallow_full_blocking) current_no_gc_region_info.minimal_gc_p = TRUE; if (allocation_no_gc_soh != 0) { current_no_gc_region_info.soh_allocation_size = (size_t)allocation_no_gc_soh; size_per_heap = current_no_gc_region_info.soh_allocation_size; #ifdef MULTIPLE_HEAPS size_per_heap /= n_heaps; for (int i = 0; i < n_heaps; i++) { // due to heap balancing we need to allow some room before we even look to balance to another heap. g_heaps[i]->soh_allocation_no_gc = min (Align ((size_per_heap + min_balance_threshold), soh_align_const), max_soh_allocated); } #else //MULTIPLE_HEAPS soh_allocation_no_gc = min (Align (size_per_heap, soh_align_const), max_soh_allocated); #endif //MULTIPLE_HEAPS } if (allocation_no_gc_loh != 0) { current_no_gc_region_info.loh_allocation_size = (size_t)allocation_no_gc_loh; size_per_heap = current_no_gc_region_info.loh_allocation_size; #ifdef MULTIPLE_HEAPS size_per_heap /= n_heaps; for (int i = 0; i < n_heaps; i++) g_heaps[i]->loh_allocation_no_gc = Align (size_per_heap, get_alignment_constant (FALSE)); #else //MULTIPLE_HEAPS loh_allocation_no_gc = Align (size_per_heap, get_alignment_constant (FALSE)); #endif //MULTIPLE_HEAPS } done: if (status != start_no_gc_success) restore_data_for_no_gc(); return status; } void gc_heap::handle_failure_for_no_gc() { gc_heap::restore_data_for_no_gc(); // sets current_no_gc_region_info.started to FALSE here. memset (&current_no_gc_region_info, 0, sizeof (current_no_gc_region_info)); } start_no_gc_region_status gc_heap::get_start_no_gc_region_status() { return current_no_gc_region_info.start_status; } void gc_heap::record_gcs_during_no_gc() { if (current_no_gc_region_info.started) { current_no_gc_region_info.num_gcs++; if (is_induced (settings.reason)) current_no_gc_region_info.num_gcs_induced++; } } BOOL gc_heap::find_loh_free_for_no_gc() { allocator* loh_allocator = generation_allocator (generation_of (loh_generation)); size_t size = loh_allocation_no_gc; for (unsigned int a_l_idx = loh_allocator->first_suitable_bucket(size); a_l_idx < loh_allocator->number_of_buckets(); a_l_idx++) { uint8_t* free_list = loh_allocator->alloc_list_head_of (a_l_idx); while (free_list) { size_t free_list_size = unused_array_size(free_list); if (free_list_size > size) { dprintf (3, ("free item %Ix(%Id) for no gc", (size_t)free_list, free_list_size)); return TRUE; } free_list = free_list_slot (free_list); } } return FALSE; } BOOL gc_heap::find_loh_space_for_no_gc() { saved_loh_segment_no_gc = 0; if (find_loh_free_for_no_gc()) return TRUE; heap_segment* seg = generation_allocation_segment (generation_of (loh_generation)); while (seg) { size_t remaining = heap_segment_reserved (seg) - heap_segment_allocated (seg); if (remaining >= loh_allocation_no_gc) { saved_loh_segment_no_gc = seg; break; } seg = heap_segment_next (seg); } if (!saved_loh_segment_no_gc && current_no_gc_region_info.minimal_gc_p) { // If no full GC is allowed, we try to get a new seg right away. saved_loh_segment_no_gc = get_segment_for_uoh (loh_generation, get_uoh_seg_size (loh_allocation_no_gc) #ifdef MULTIPLE_HEAPS , this #endif //MULTIPLE_HEAPS ); } return (saved_loh_segment_no_gc != 0); } BOOL gc_heap::loh_allocated_for_no_gc() { if (!saved_loh_segment_no_gc) return FALSE; heap_segment* seg = generation_allocation_segment (generation_of (loh_generation)); do { if (seg == saved_loh_segment_no_gc) { return FALSE; } seg = heap_segment_next (seg); } while (seg); return TRUE; } BOOL gc_heap::commit_loh_for_no_gc (heap_segment* seg) { uint8_t* end_committed = heap_segment_allocated (seg) + loh_allocation_no_gc; assert (end_committed <= heap_segment_reserved (seg)); return (grow_heap_segment (seg, end_committed)); } void gc_heap::thread_no_gc_loh_segments() { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; if (hp->loh_allocated_for_no_gc()) { hp->thread_uoh_segment (loh_generation, hp->saved_loh_segment_no_gc); hp->saved_loh_segment_no_gc = 0; } } #else //MULTIPLE_HEAPS if (loh_allocated_for_no_gc()) { thread_uoh_segment (loh_generation, saved_loh_segment_no_gc); saved_loh_segment_no_gc = 0; } #endif //MULTIPLE_HEAPS } void gc_heap::set_loh_allocations_for_no_gc() { if (current_no_gc_region_info.loh_allocation_size != 0) { dynamic_data* dd = dynamic_data_of (loh_generation); dd_new_allocation (dd) = loh_allocation_no_gc; dd_gc_new_allocation (dd) = dd_new_allocation (dd); } } void gc_heap::set_soh_allocations_for_no_gc() { if (current_no_gc_region_info.soh_allocation_size != 0) { dynamic_data* dd = dynamic_data_of (0); dd_new_allocation (dd) = soh_allocation_no_gc; dd_gc_new_allocation (dd) = dd_new_allocation (dd); #ifdef MULTIPLE_HEAPS alloc_context_count = 0; #endif //MULTIPLE_HEAPS } } void gc_heap::set_allocations_for_no_gc() { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; hp->set_loh_allocations_for_no_gc(); hp->set_soh_allocations_for_no_gc(); } #else //MULTIPLE_HEAPS set_loh_allocations_for_no_gc(); set_soh_allocations_for_no_gc(); #endif //MULTIPLE_HEAPS } BOOL gc_heap::should_proceed_for_no_gc() { BOOL gc_requested = FALSE; BOOL loh_full_gc_requested = FALSE; BOOL soh_full_gc_requested = FALSE; BOOL no_gc_requested = FALSE; BOOL get_new_loh_segments = FALSE; gc_heap* hp = nullptr; if (current_no_gc_region_info.soh_allocation_size) { #ifdef USE_REGIONS #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { hp = g_heaps[i]; #else { hp = pGenGCHeap; #endif //MULTIPLE_HEAPS if (!hp->extend_soh_for_no_gc()) { soh_full_gc_requested = TRUE; #ifdef MULTIPLE_HEAPS break; #endif //MULTIPLE_HEAPS } } #else //USE_REGIONS #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { hp = g_heaps[i]; #else //MULTIPLE_HEAPS { hp = pGenGCHeap; #endif //MULTIPLE_HEAPS size_t reserved_space = heap_segment_reserved (hp->ephemeral_heap_segment) - hp->alloc_allocated; if (reserved_space < hp->soh_allocation_no_gc) { gc_requested = TRUE; #ifdef MULTIPLE_HEAPS break; #endif //MULTIPLE_HEAPS } } if (!gc_requested) { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { hp = g_heaps[i]; #else //MULTIPLE_HEAPS { hp = pGenGCHeap; #endif //MULTIPLE_HEAPS if (!(hp->grow_heap_segment (hp->ephemeral_heap_segment, (hp->alloc_allocated + hp->soh_allocation_no_gc)))) { soh_full_gc_requested = TRUE; #ifdef MULTIPLE_HEAPS break; #endif //MULTIPLE_HEAPS } } } #endif //USE_REGIONS } if (!current_no_gc_region_info.minimal_gc_p && gc_requested) { soh_full_gc_requested = TRUE; } no_gc_requested = !(soh_full_gc_requested || gc_requested); if (soh_full_gc_requested && current_no_gc_region_info.minimal_gc_p) { current_no_gc_region_info.start_status = start_no_gc_no_memory; goto done; } if (!soh_full_gc_requested && current_no_gc_region_info.loh_allocation_size) { // Check to see if we have enough reserved space. #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; if (!hp->find_loh_space_for_no_gc()) { loh_full_gc_requested = TRUE; break; } } #else //MULTIPLE_HEAPS if (!find_loh_space_for_no_gc()) loh_full_gc_requested = TRUE; #endif //MULTIPLE_HEAPS // Check to see if we have committed space. if (!loh_full_gc_requested) { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; if (hp->saved_loh_segment_no_gc &&!hp->commit_loh_for_no_gc (hp->saved_loh_segment_no_gc)) { loh_full_gc_requested = TRUE; break; } } #else //MULTIPLE_HEAPS if (saved_loh_segment_no_gc && !commit_loh_for_no_gc (saved_loh_segment_no_gc)) loh_full_gc_requested = TRUE; #endif //MULTIPLE_HEAPS } } if (loh_full_gc_requested || soh_full_gc_requested) { if (current_no_gc_region_info.minimal_gc_p) current_no_gc_region_info.start_status = start_no_gc_no_memory; } no_gc_requested = !(loh_full_gc_requested || soh_full_gc_requested || gc_requested); if (current_no_gc_region_info.start_status == start_no_gc_success) { if (no_gc_requested) set_allocations_for_no_gc(); } done: if ((current_no_gc_region_info.start_status == start_no_gc_success) && !no_gc_requested) return TRUE; else { // We are done with starting the no_gc_region. current_no_gc_region_info.started = TRUE; return FALSE; } } end_no_gc_region_status gc_heap::end_no_gc_region() { dprintf (1, ("end no gc called")); end_no_gc_region_status status = end_no_gc_success; if (!(current_no_gc_region_info.started)) status = end_no_gc_not_in_progress; if (current_no_gc_region_info.num_gcs_induced) status = end_no_gc_induced; else if (current_no_gc_region_info.num_gcs) status = end_no_gc_alloc_exceeded; if (settings.pause_mode == pause_no_gc) restore_data_for_no_gc(); // sets current_no_gc_region_info.started to FALSE here. memset (&current_no_gc_region_info, 0, sizeof (current_no_gc_region_info)); return status; } //update counters void gc_heap::update_collection_counts () { dynamic_data* dd0 = dynamic_data_of (0); dd_gc_clock (dd0) += 1; uint64_t now = GetHighPrecisionTimeStamp(); for (int i = 0; i <= settings.condemned_generation;i++) { dynamic_data* dd = dynamic_data_of (i); dd_collection_count (dd)++; //this is needed by the linear allocation model if (i == max_generation) { dd_collection_count (dynamic_data_of (loh_generation))++; dd_collection_count(dynamic_data_of(poh_generation))++; } dd_gc_clock (dd) = dd_gc_clock (dd0); dd_previous_time_clock (dd) = dd_time_clock (dd); dd_time_clock (dd) = now; } } #ifdef USE_REGIONS bool gc_heap::extend_soh_for_no_gc() { size_t required = soh_allocation_no_gc; heap_segment* region = ephemeral_heap_segment; while (true) { uint8_t* allocated = (region == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (region); size_t available = heap_segment_reserved (region) - allocated; size_t commit = min (available, required); if (grow_heap_segment (region, allocated + commit)) { required -= commit; if (required == 0) { break; } region = heap_segment_next (region); if (region == nullptr) { region = get_new_region (0); if (region == nullptr) { break; } else { GCToEEInterface::DiagAddNewRegion( 0, heap_segment_mem (region), heap_segment_allocated (region), heap_segment_reserved (region) ); } } } else { break; } } return (required == 0); } #else BOOL gc_heap::expand_soh_with_minimal_gc() { if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment)) >= soh_allocation_no_gc) return TRUE; heap_segment* new_seg = soh_get_segment_to_expand(); if (new_seg) { if (g_gc_card_table != card_table) copy_brick_card_table(); settings.promotion = TRUE; settings.demotion = FALSE; ephemeral_promotion = TRUE; int condemned_gen_number = max_generation - 1; int align_const = get_alignment_constant (TRUE); for (int i = 0; i <= condemned_gen_number; i++) { generation* gen = generation_of (i); saved_ephemeral_plan_start[i] = generation_allocation_start (gen); saved_ephemeral_plan_start_size[i] = Align (size (generation_allocation_start (gen)), align_const); } // We do need to clear the bricks here as we are converting a bunch of ephemeral objects to gen2 // and need to make sure that there are no left over bricks from the previous GCs for the space // we just used for gen0 allocation. We will need to go through the bricks for these objects for // ephemeral GCs later. for (size_t b = brick_of (generation_allocation_start (generation_of (0))); b < brick_of (align_on_brick (heap_segment_allocated (ephemeral_heap_segment))); b++) { set_brick (b, -1); } size_t ephemeral_size = (heap_segment_allocated (ephemeral_heap_segment) - generation_allocation_start (generation_of (max_generation - 1))); heap_segment_next (ephemeral_heap_segment) = new_seg; ephemeral_heap_segment = new_seg; uint8_t* start = heap_segment_mem (ephemeral_heap_segment); for (int i = condemned_gen_number; i >= 0; i--) { size_t gen_start_size = Align (min_obj_size); make_generation (i, ephemeral_heap_segment, start); generation* gen = generation_of (i); generation_plan_allocation_start (gen) = start; generation_plan_allocation_start_size (gen) = gen_start_size; start += gen_start_size; } heap_segment_used (ephemeral_heap_segment) = start - plug_skew; heap_segment_plan_allocated (ephemeral_heap_segment) = start; fix_generation_bounds (condemned_gen_number, generation_of (0)); dd_gc_new_allocation (dynamic_data_of (max_generation)) -= ephemeral_size; dd_new_allocation (dynamic_data_of (max_generation)) = dd_gc_new_allocation (dynamic_data_of (max_generation)); adjust_ephemeral_limits(); return TRUE; } else { return FALSE; } } #endif //USE_REGIONS // Only to be done on the thread that calls restart in a join for server GC // and reset the oom status per heap. void gc_heap::check_and_set_no_gc_oom() { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; if (hp->no_gc_oom_p) { current_no_gc_region_info.start_status = start_no_gc_no_memory; hp->no_gc_oom_p = false; } } #else if (no_gc_oom_p) { current_no_gc_region_info.start_status = start_no_gc_no_memory; no_gc_oom_p = false; } #endif //MULTIPLE_HEAPS } void gc_heap::allocate_for_no_gc_after_gc() { if (current_no_gc_region_info.minimal_gc_p) repair_allocation_contexts (TRUE); no_gc_oom_p = false; if (current_no_gc_region_info.start_status != start_no_gc_no_memory) { if (current_no_gc_region_info.soh_allocation_size != 0) { #ifdef USE_REGIONS no_gc_oom_p = !extend_soh_for_no_gc(); #else if (((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment)) < soh_allocation_no_gc) || (!grow_heap_segment (ephemeral_heap_segment, (heap_segment_allocated (ephemeral_heap_segment) + soh_allocation_no_gc)))) { no_gc_oom_p = true; } #endif //USE_REGIONS #ifdef MULTIPLE_HEAPS gc_t_join.join(this, gc_join_after_commit_soh_no_gc); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { check_and_set_no_gc_oom(); #ifdef MULTIPLE_HEAPS gc_t_join.restart(); #endif //MULTIPLE_HEAPS } } if ((current_no_gc_region_info.start_status == start_no_gc_success) && !(current_no_gc_region_info.minimal_gc_p) && (current_no_gc_region_info.loh_allocation_size != 0)) { gc_policy = policy_compact; saved_loh_segment_no_gc = 0; if (!find_loh_free_for_no_gc()) { heap_segment* seg = generation_allocation_segment (generation_of (loh_generation)); BOOL found_seg_p = FALSE; while (seg) { if ((size_t)(heap_segment_reserved (seg) - heap_segment_allocated (seg)) >= loh_allocation_no_gc) { found_seg_p = TRUE; if (!commit_loh_for_no_gc (seg)) { no_gc_oom_p = true; break; } } seg = heap_segment_next (seg); } if (!found_seg_p) gc_policy = policy_expand; } #ifdef MULTIPLE_HEAPS gc_t_join.join(this, gc_join_expand_loh_no_gc); if (gc_t_join.joined()) { check_and_set_no_gc_oom(); if (current_no_gc_region_info.start_status == start_no_gc_success) { for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; if (hp->gc_policy == policy_expand) { hp->saved_loh_segment_no_gc = get_segment_for_uoh (loh_generation, get_uoh_seg_size (loh_allocation_no_gc), hp); if (!(hp->saved_loh_segment_no_gc)) { current_no_gc_region_info.start_status = start_no_gc_no_memory; break; } } } } gc_t_join.restart(); } #else //MULTIPLE_HEAPS check_and_set_no_gc_oom(); if ((current_no_gc_region_info.start_status == start_no_gc_success) && (gc_policy == policy_expand)) { saved_loh_segment_no_gc = get_segment_for_uoh (loh_generation, get_uoh_seg_size (loh_allocation_no_gc)); if (!saved_loh_segment_no_gc) current_no_gc_region_info.start_status = start_no_gc_no_memory; } #endif //MULTIPLE_HEAPS if ((current_no_gc_region_info.start_status == start_no_gc_success) && saved_loh_segment_no_gc) { if (!commit_loh_for_no_gc (saved_loh_segment_no_gc)) { no_gc_oom_p = true; } } } } #ifdef MULTIPLE_HEAPS gc_t_join.join(this, gc_join_final_no_gc); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { check_and_set_no_gc_oom(); if (current_no_gc_region_info.start_status == start_no_gc_success) { set_allocations_for_no_gc(); current_no_gc_region_info.started = TRUE; } #ifdef MULTIPLE_HEAPS gc_t_join.restart(); #endif //MULTIPLE_HEAPS } } void gc_heap::init_records() { // An option is to move this to be after we figure out which gen to condemn so we don't // need to clear some generations' data 'cause we know they don't change, but that also means // we can't simply call memset here. memset (&gc_data_per_heap, 0, sizeof (gc_data_per_heap)); gc_data_per_heap.heap_index = heap_number; if (heap_number == 0) memset (&gc_data_global, 0, sizeof (gc_data_global)); #ifdef GC_CONFIG_DRIVEN memset (interesting_data_per_gc, 0, sizeof (interesting_data_per_gc)); #endif //GC_CONFIG_DRIVEN memset (&fgm_result, 0, sizeof (fgm_result)); for (int i = 0; i < total_generation_count; i++) { gc_data_per_heap.gen_data[i].size_before = generation_size (i); generation* gen = generation_of (i); gc_data_per_heap.gen_data[i].free_list_space_before = generation_free_list_space (gen); gc_data_per_heap.gen_data[i].free_obj_space_before = generation_free_obj_space (gen); } #ifdef USE_REGIONS end_gen0_region_space = 0; gen0_pinned_free_space = 0; gen0_large_chunk_found = false; num_regions_freed_in_sweep = 0; #endif //USE_REGIONS sufficient_gen0_space_p = FALSE; #ifdef MULTIPLE_HEAPS gen0_allocated_after_gc_p = false; #endif //MULTIPLE_HEAPS #if defined (_DEBUG) && defined (VERIFY_HEAP) verify_pinned_queue_p = FALSE; #endif // _DEBUG && VERIFY_HEAP } void gc_heap::pm_full_gc_init_or_clear() { // This means the next GC will be a full blocking GC and we need to init. if (settings.condemned_generation == (max_generation - 1)) { if (pm_trigger_full_gc) { #ifdef MULTIPLE_HEAPS do_post_gc(); #endif //MULTIPLE_HEAPS dprintf (GTC_LOG, ("init for PM triggered full GC")); uint32_t saved_entry_memory_load = settings.entry_memory_load; settings.init_mechanisms(); settings.reason = reason_pm_full_gc; settings.condemned_generation = max_generation; settings.entry_memory_load = saved_entry_memory_load; // Can't assert this since we only check at the end of gen2 GCs, // during gen1 the memory load could have already dropped. // Although arguably we should just turn off PM then... //assert (settings.entry_memory_load >= high_memory_load_th); assert (settings.entry_memory_load > 0); settings.gc_index += 1; do_pre_gc(); } } // This means we are in the progress of a full blocking GC triggered by // this PM mode. else if (settings.reason == reason_pm_full_gc) { assert (settings.condemned_generation == max_generation); assert (pm_trigger_full_gc); pm_trigger_full_gc = false; dprintf (GTC_LOG, ("PM triggered full GC done")); } } void gc_heap::garbage_collect_pm_full_gc() { assert (settings.condemned_generation == max_generation); assert (settings.reason == reason_pm_full_gc); assert (!settings.concurrent); gc1(); } void gc_heap::garbage_collect (int n) { //reset the number of alloc contexts alloc_contexts_used = 0; fix_allocation_contexts (TRUE); #ifdef MULTIPLE_HEAPS #ifdef JOIN_STATS gc_t_join.start_ts(this); #endif //JOIN_STATS check_gen0_bricks(); clear_gen0_bricks(); #endif //MULTIPLE_HEAPS if ((settings.pause_mode == pause_no_gc) && current_no_gc_region_info.minimal_gc_p) { #ifdef MULTIPLE_HEAPS gc_t_join.join(this, gc_join_minimal_gc); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifndef USE_REGIONS #ifdef MULTIPLE_HEAPS // this is serialized because we need to get a segment for (int i = 0; i < n_heaps; i++) { if (!(g_heaps[i]->expand_soh_with_minimal_gc())) current_no_gc_region_info.start_status = start_no_gc_no_memory; } #else if (!expand_soh_with_minimal_gc()) current_no_gc_region_info.start_status = start_no_gc_no_memory; #endif //MULTIPLE_HEAPS #endif //!USE_REGIONS update_collection_counts_for_no_gc(); #ifdef MULTIPLE_HEAPS gc_start_event.Reset(); gc_t_join.restart(); #endif //MULTIPLE_HEAPS } goto done; } init_records(); settings.reason = gc_trigger_reason; num_pinned_objects = 0; #ifdef STRESS_HEAP if (settings.reason == reason_gcstress) { settings.reason = reason_induced; settings.stress_induced = TRUE; } #endif // STRESS_HEAP #ifdef MULTIPLE_HEAPS //align all heaps on the max generation to condemn dprintf (3, ("Joining for max generation to condemn")); condemned_generation_num = generation_to_condemn (n, &blocking_collection, &elevation_requested, FALSE); gc_t_join.join(this, gc_join_generation_determined); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_BASICFREEZE seg_table->delete_old_slots(); #endif //FEATURE_BASICFREEZE #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; // check for card table growth if (g_gc_card_table != hp->card_table) hp->copy_brick_card_table(); hp->rearrange_uoh_segments(); #ifdef BACKGROUND_GC hp->background_delay_delete_uoh_segments(); if (!gc_heap::background_running_p()) hp->rearrange_small_heap_segments(); #endif //BACKGROUND_GC } #else //MULTIPLE_HEAPS if (g_gc_card_table != card_table) copy_brick_card_table(); rearrange_uoh_segments(); #ifdef BACKGROUND_GC background_delay_delete_uoh_segments(); if (!gc_heap::background_running_p()) rearrange_small_heap_segments(); #endif //BACKGROUND_GC #endif //MULTIPLE_HEAPS BOOL should_evaluate_elevation = TRUE; BOOL should_do_blocking_collection = FALSE; #ifdef MULTIPLE_HEAPS int gen_max = condemned_generation_num; for (int i = 0; i < n_heaps; i++) { if (gen_max < g_heaps[i]->condemned_generation_num) gen_max = g_heaps[i]->condemned_generation_num; if (should_evaluate_elevation && !(g_heaps[i]->elevation_requested)) should_evaluate_elevation = FALSE; if ((!should_do_blocking_collection) && (g_heaps[i]->blocking_collection)) should_do_blocking_collection = TRUE; } settings.condemned_generation = gen_max; #else //MULTIPLE_HEAPS settings.condemned_generation = generation_to_condemn (n, &blocking_collection, &elevation_requested, FALSE); should_evaluate_elevation = elevation_requested; should_do_blocking_collection = blocking_collection; #endif //MULTIPLE_HEAPS settings.condemned_generation = joined_generation_to_condemn ( should_evaluate_elevation, n, settings.condemned_generation, &should_do_blocking_collection STRESS_HEAP_ARG(n) ); STRESS_LOG1(LF_GCROOTS|LF_GC|LF_GCALLOC, LL_INFO10, "condemned generation num: %d\n", settings.condemned_generation); record_gcs_during_no_gc(); if (settings.condemned_generation > 1) settings.promotion = TRUE; #ifdef HEAP_ANALYZE // At this point we've decided what generation is condemned // See if we've been requested to analyze survivors after the mark phase if (GCToEEInterface::AnalyzeSurvivorsRequested(settings.condemned_generation)) { heap_analyze_enabled = TRUE; } #endif // HEAP_ANALYZE GCToEEInterface::DiagGCStart(settings.condemned_generation, settings.reason == reason_induced); #ifdef BACKGROUND_GC if ((settings.condemned_generation == max_generation) && (should_do_blocking_collection == FALSE) && gc_can_use_concurrent && !temp_disable_concurrent_p && ((settings.pause_mode == pause_interactive) || (settings.pause_mode == pause_sustained_low_latency))) { keep_bgc_threads_p = TRUE; c_write (settings.concurrent, TRUE); memset (&bgc_data_global, 0, sizeof(bgc_data_global)); memcpy (&bgc_data_global, &gc_data_global, sizeof(gc_data_global)); } #endif //BACKGROUND_GC settings.gc_index = (uint32_t)dd_collection_count (dynamic_data_of (0)) + 1; #ifdef MULTIPLE_HEAPS hb_log_balance_activities(); hb_log_new_allocation(); #endif //MULTIPLE_HEAPS // Call the EE for start of GC work GCToEEInterface::GcStartWork (settings.condemned_generation, max_generation); // TODO: we could fire an ETW event to say this GC as a concurrent GC but later on due to not being able to // create threads or whatever, this could be a non concurrent GC. Maybe for concurrent GC we should fire // it in do_background_gc and if it failed to be a CGC we fire it in gc1... in other words, this should be // fired in gc1. do_pre_gc(); #ifdef MULTIPLE_HEAPS gc_start_event.Reset(); dprintf(3, ("Starting all gc threads for gc")); gc_t_join.restart(); #endif //MULTIPLE_HEAPS } descr_generations ("BEGIN"); #if defined(TRACE_GC) && defined(USE_REGIONS) if (heap_number == 0) { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { gc_heap *hp = g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; const int i = 0; #endif //MULTIPLE_HEAPS if (settings.condemned_generation == max_generation) { // print all kinds of free regions region_free_list::print(hp->free_regions, i, "BEGIN"); } else { // print only basic free regions hp->free_regions[basic_free_region].print (i, "BEGIN"); } } } #endif // TRACE_GC && USE_REGIONS #ifdef VERIFY_HEAP if ((GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) && !(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_POST_GC_ONLY)) { verify_heap (TRUE); } if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_BARRIERCHECK) checkGCWriteBarrier(); #endif // VERIFY_HEAP #ifdef BACKGROUND_GC if (settings.concurrent) { // We need to save the settings because we'll need to restore it after each FGC. assert (settings.condemned_generation == max_generation); settings.compaction = FALSE; saved_bgc_settings = settings; #ifdef MULTIPLE_HEAPS if (heap_number == 0) { for (int i = 0; i < n_heaps; i++) { prepare_bgc_thread (g_heaps[i]); } dprintf (2, ("setting bgc_threads_sync_event")); bgc_threads_sync_event.Set(); } else { bgc_threads_sync_event.Wait(INFINITE, FALSE); dprintf (2, ("bgc_threads_sync_event is signalled")); } #else prepare_bgc_thread(0); #endif //MULTIPLE_HEAPS #ifdef MULTIPLE_HEAPS gc_t_join.join(this, gc_join_start_bgc); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { do_concurrent_p = TRUE; do_ephemeral_gc_p = FALSE; #ifdef MULTIPLE_HEAPS dprintf(2, ("Joined to perform a background GC")); for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; if (!(hp->bgc_thread) || !hp->commit_mark_array_bgc_init()) { do_concurrent_p = FALSE; break; } else { hp->background_saved_lowest_address = hp->lowest_address; hp->background_saved_highest_address = hp->highest_address; } } #else do_concurrent_p = (!!bgc_thread && commit_mark_array_bgc_init()); if (do_concurrent_p) { background_saved_lowest_address = lowest_address; background_saved_highest_address = highest_address; } #endif //MULTIPLE_HEAPS if (do_concurrent_p) { #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP SoftwareWriteWatch::EnableForGCHeap(); #endif //FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) g_heaps[i]->current_bgc_state = bgc_initialized; #else current_bgc_state = bgc_initialized; #endif //MULTIPLE_HEAPS int gen = check_for_ephemeral_alloc(); // always do a gen1 GC before we start BGC. dont_restart_ee_p = TRUE; if (gen == -1) { // If we decide to not do a GC before the BGC we need to // restore the gen0 alloc context. #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { generation_allocation_pointer (g_heaps[i]->generation_of (0)) = 0; generation_allocation_limit (g_heaps[i]->generation_of (0)) = 0; } #else generation_allocation_pointer (youngest_generation) = 0; generation_allocation_limit (youngest_generation) = 0; #endif //MULTIPLE_HEAPS } else { do_ephemeral_gc_p = TRUE; settings.init_mechanisms(); settings.condemned_generation = gen; settings.gc_index = (size_t)dd_collection_count (dynamic_data_of (0)) + 2; do_pre_gc(); // TODO BACKGROUND_GC need to add the profiling stuff here. dprintf (GTC_LOG, ("doing gen%d before doing a bgc", gen)); } //clear the cards so they don't bleed in gen 1 during collection // shouldn't this always be done at the beginning of any GC? //clear_card_for_addresses ( // generation_allocation_start (generation_of (0)), // heap_segment_allocated (ephemeral_heap_segment)); if (!do_ephemeral_gc_p) { do_background_gc(); } } else { settings.compaction = TRUE; c_write (settings.concurrent, FALSE); } #ifdef MULTIPLE_HEAPS gc_t_join.restart(); #endif //MULTIPLE_HEAPS } if (do_concurrent_p) { // At this point we are sure we'll be starting a BGC, so save its per heap data here. // global data is only calculated at the end of the GC so we don't need to worry about // FGCs overwriting it. memset (&bgc_data_per_heap, 0, sizeof (bgc_data_per_heap)); memcpy (&bgc_data_per_heap, &gc_data_per_heap, sizeof(gc_data_per_heap)); if (do_ephemeral_gc_p) { dprintf (2, ("GC threads running, doing gen%d GC", settings.condemned_generation)); gen_to_condemn_reasons.init(); gen_to_condemn_reasons.set_condition (gen_before_bgc); gc_data_per_heap.gen_to_condemn_reasons.init (&gen_to_condemn_reasons); gc1(); #ifdef MULTIPLE_HEAPS gc_t_join.join(this, gc_join_bgc_after_ephemeral); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef MULTIPLE_HEAPS do_post_gc(); #endif //MULTIPLE_HEAPS settings = saved_bgc_settings; assert (settings.concurrent); do_background_gc(); #ifdef MULTIPLE_HEAPS gc_t_join.restart(); #endif //MULTIPLE_HEAPS } } } else { dprintf (2, ("couldn't create BGC threads, reverting to doing a blocking GC")); gc1(); } } else #endif //BACKGROUND_GC { gc1(); } #ifndef MULTIPLE_HEAPS allocation_running_time = (size_t)GCToOSInterface::GetLowPrecisionTimeStamp(); allocation_running_amount = dd_new_allocation (dynamic_data_of (0)); fgn_last_alloc = dd_new_allocation (dynamic_data_of (0)); #endif //MULTIPLE_HEAPS done: if (settings.pause_mode == pause_no_gc) allocate_for_no_gc_after_gc(); } #define mark_stack_empty_p() (mark_stack_base == mark_stack_tos) inline size_t gc_heap::get_promoted_bytes() { #ifdef USE_REGIONS if (!survived_per_region) { dprintf (REGIONS_LOG, ("no space to store promoted bytes")); return 0; } dprintf (3, ("h%d getting surv", heap_number)); size_t promoted = 0; for (size_t i = 0; i < region_count; i++) { if (survived_per_region[i] > 0) { heap_segment* region = get_region_at_index (i); dprintf (REGIONS_LOG, ("h%d region[%d] %Ix(g%d)(%s) surv: %Id(%Ix)", heap_number, i, heap_segment_mem (region), heap_segment_gen_num (region), (heap_segment_loh_p (region) ? "LOH" : (heap_segment_poh_p (region) ? "POH" :"SOH")), survived_per_region[i], &survived_per_region[i])); promoted += survived_per_region[i]; } } #ifdef _DEBUG dprintf (REGIONS_LOG, ("h%d global recorded %Id, regions recorded %Id", heap_number, promoted_bytes (heap_number), promoted)); assert (promoted_bytes (heap_number) == promoted); #endif //_DEBUG return promoted; #else //USE_REGIONS #ifdef MULTIPLE_HEAPS return g_promoted [heap_number*16]; #else //MULTIPLE_HEAPS return g_promoted; #endif //MULTIPLE_HEAPS #endif //USE_REGIONS } #ifdef USE_REGIONS void gc_heap::sync_promoted_bytes() { int condemned_gen_number = settings.condemned_generation; int highest_gen_number = ((condemned_gen_number == max_generation) ? (total_generation_count - 1) : settings.condemned_generation); int stop_gen_idx = get_stop_generation_index (condemned_gen_number); #ifdef MULTIPLE_HEAPS // We gather all the promoted bytes for a region recorded by all threads into that region's survived // for plan phase. sore_mark_list will be called shortly and will start using the same storage that // the GC threads used to record promoted bytes. for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS for (int gen_idx = highest_gen_number; gen_idx >= stop_gen_idx; gen_idx--) { generation* condemned_gen = hp->generation_of (gen_idx); heap_segment* current_region = heap_segment_rw (generation_start_segment (condemned_gen)); while (current_region) { size_t region_index = get_basic_region_index_for_address (heap_segment_mem (current_region)); #ifdef MULTIPLE_HEAPS size_t total_surv = 0; size_t total_old_card_surv = 0; for (int hp_idx = 0; hp_idx < n_heaps; hp_idx++) { total_surv += g_heaps[hp_idx]->survived_per_region[region_index]; total_old_card_surv += g_heaps[hp_idx]->old_card_survived_per_region[region_index]; } heap_segment_survived (current_region) = (int)total_surv; heap_segment_old_card_survived (current_region) = (int)total_old_card_surv; #else heap_segment_survived (current_region) = (int)(survived_per_region[region_index]); heap_segment_old_card_survived (current_region) = (int)(old_card_survived_per_region[region_index]); #endif //MULTIPLE_HEAPS dprintf (REGIONS_LOG, ("region #%d %Ix surv %Id, old card surv %Id", region_index, heap_segment_mem (current_region), heap_segment_survived (current_region), heap_segment_old_card_survived (current_region))); current_region = heap_segment_next (current_region); } } } } #ifdef MULTIPLE_HEAPS void gc_heap::set_heap_for_contained_basic_regions (heap_segment* region, gc_heap* hp) { uint8_t* region_start = get_region_start (region); uint8_t* region_end = heap_segment_reserved (region); int num_basic_regions = (int)((region_end - region_start) >> min_segment_size_shr); for (int i = 0; i < num_basic_regions; i++) { uint8_t* basic_region_start = region_start + ((size_t)i << min_segment_size_shr); heap_segment* basic_region = get_region_info (basic_region_start); heap_segment_heap (basic_region) = hp; } } heap_segment* gc_heap::unlink_first_rw_region (int gen_idx) { generation* gen = generation_of (gen_idx); heap_segment* prev_region = generation_tail_ro_region (gen); heap_segment* region = nullptr; if (prev_region) { assert (heap_segment_read_only_p (prev_region)); region = heap_segment_next (prev_region); assert (region != nullptr); // don't remove the last region in the generation if (heap_segment_next (region) == nullptr) { assert (region == generation_tail_region (gen)); return nullptr; } heap_segment_next (prev_region) = heap_segment_next (region); } else { region = generation_start_segment (gen); assert (region != nullptr); // don't remove the last region in the generation if (heap_segment_next (region) == nullptr) { assert (region == generation_tail_region (gen)); return nullptr; } generation_start_segment (gen) = heap_segment_next (region); } assert (region != generation_tail_region (gen)); assert (!heap_segment_read_only_p (region)); dprintf (REGIONS_LOG, ("unlink_first_rw_region on heap: %d gen: %d region: %Ix", heap_number, gen_idx, heap_segment_mem (region))); set_heap_for_contained_basic_regions (region, nullptr); return region; } void gc_heap::thread_rw_region_front (int gen_idx, heap_segment* region) { generation* gen = generation_of (gen_idx); assert (!heap_segment_read_only_p (region)); heap_segment* prev_region = generation_tail_ro_region (gen); if (prev_region) { heap_segment_next (region) = heap_segment_next (prev_region); heap_segment_next (prev_region) = region; } else { heap_segment_next (region) = generation_start_segment (gen); generation_start_segment (gen) = region; } dprintf (REGIONS_LOG, ("thread_rw_region_front on heap: %d gen: %d region: %Ix", heap_number, gen_idx, heap_segment_mem (region))); set_heap_for_contained_basic_regions (region, this); } #endif // MULTIPLE_HEAPS void gc_heap::equalize_promoted_bytes() { #ifdef MULTIPLE_HEAPS // algorithm to roughly balance promoted bytes across heaps by moving regions between heaps // goal is just to balance roughly, while keeping computational complexity low // hope is to achieve better work balancing in relocate and compact phases // int condemned_gen_number = settings.condemned_generation; int highest_gen_number = ((condemned_gen_number == max_generation) ? (total_generation_count - 1) : condemned_gen_number); int stop_gen_idx = get_stop_generation_index (condemned_gen_number); for (int gen_idx = highest_gen_number; gen_idx >= stop_gen_idx; gen_idx--) { // step 1: // compute total promoted bytes per gen size_t total_surv = 0; size_t max_surv_per_heap = 0; size_t surv_per_heap[MAX_SUPPORTED_CPUS]; for (int i = 0; i < n_heaps; i++) { surv_per_heap[i] = 0; gc_heap* hp = g_heaps[i]; generation* condemned_gen = hp->generation_of (gen_idx); heap_segment* current_region = heap_segment_rw (generation_start_segment (condemned_gen)); while (current_region) { total_surv += heap_segment_survived (current_region); surv_per_heap[i] += heap_segment_survived (current_region); current_region = heap_segment_next (current_region); } max_surv_per_heap = max (max_surv_per_heap, surv_per_heap[i]); dprintf (REGIONS_LOG, ("gen: %d heap %d surv: %Id", gen_idx, i, surv_per_heap[i])); } // compute average promoted bytes per heap and per gen // be careful to round up size_t avg_surv_per_heap = (total_surv + n_heaps - 1) / n_heaps; if (avg_surv_per_heap != 0) { dprintf (REGIONS_LOG, ("before equalize: gen: %d avg surv: %Id max_surv: %Id imbalance: %d", gen_idx, avg_surv_per_heap, max_surv_per_heap, max_surv_per_heap*100/avg_surv_per_heap)); } // // step 2: // remove regions from surplus heaps until all heaps are <= average // put removed regions into surplus regions // // step 3: // put regions into size classes by survivorship // put deficit heaps into size classes by deficit // // step 4: // while (surplus regions is non-empty) // get surplus region from biggest size class // put it into heap from biggest deficit size class // re-insert heap by resulting deficit size class heap_segment* surplus_regions = nullptr; size_t max_deficit = 0; size_t max_survived = 0; // go through all the heaps for (int i = 0; i < n_heaps; i++) { // remove regions from this heap until it has average or less survivorship while (surv_per_heap[i] > avg_surv_per_heap) { heap_segment* region = g_heaps[i]->unlink_first_rw_region (gen_idx); if (region == nullptr) { break; } assert (surv_per_heap[i] >= (size_t)heap_segment_survived (region)); dprintf (REGIONS_LOG, ("heap: %d surv: %Id - %Id = %Id", i, surv_per_heap[i], heap_segment_survived (region), surv_per_heap[i] - heap_segment_survived (region))); surv_per_heap[i] -= heap_segment_survived (region); heap_segment_next (region) = surplus_regions; surplus_regions = region; max_survived = max (max_survived, (size_t)heap_segment_survived (region)); } if (surv_per_heap[i] < avg_surv_per_heap) { size_t deficit = avg_surv_per_heap - surv_per_heap[i]; max_deficit = max (max_deficit, deficit); } } // we arrange both surplus regions and deficit heaps by size classes const int NUM_SIZE_CLASSES = 16; heap_segment* surplus_regions_by_size_class[NUM_SIZE_CLASSES]; memset (surplus_regions_by_size_class, 0, sizeof(surplus_regions_by_size_class)); double survived_scale_factor = ((double)NUM_SIZE_CLASSES) / (max_survived + 1); heap_segment* next_region; for (heap_segment* region = surplus_regions; region != nullptr; region = next_region) { int size_class = (int)(heap_segment_survived (region)*survived_scale_factor); assert ((0 <= size_class) && (size_class < NUM_SIZE_CLASSES)); next_region = heap_segment_next (region); heap_segment_next (region) = surplus_regions_by_size_class[size_class]; surplus_regions_by_size_class[size_class] = region; } int next_heap_in_size_class[MAX_SUPPORTED_CPUS]; int heaps_by_deficit_size_class[NUM_SIZE_CLASSES]; for (int i = 0; i < NUM_SIZE_CLASSES; i++) { heaps_by_deficit_size_class[i] = -1; } double deficit_scale_factor = ((double)NUM_SIZE_CLASSES) / (max_deficit + 1); for (int i = 0; i < n_heaps; i++) { if (avg_surv_per_heap > surv_per_heap[i]) { size_t deficit = avg_surv_per_heap - surv_per_heap[i]; int size_class = (int)(deficit*deficit_scale_factor); assert ((0 <= size_class) && (size_class < NUM_SIZE_CLASSES)); next_heap_in_size_class[i] = heaps_by_deficit_size_class[size_class]; heaps_by_deficit_size_class[size_class] = i; } } int region_size_class = NUM_SIZE_CLASSES - 1; int heap_size_class = NUM_SIZE_CLASSES - 1; while (region_size_class >= 0) { // obtain a region from the biggest size class heap_segment* region = surplus_regions_by_size_class[region_size_class]; if (region == nullptr) { region_size_class--; continue; } // and a heap from the biggest deficit size class int heap_num; while (true) { if (heap_size_class < 0) { // put any remaining regions on heap 0 // rare case, but there may be some 0 surv size regions heap_num = 0; break; } heap_num = heaps_by_deficit_size_class[heap_size_class]; if (heap_num >= 0) { break; } heap_size_class--; } // now move the region to the heap surplus_regions_by_size_class[region_size_class] = heap_segment_next (region); g_heaps[heap_num]->thread_rw_region_front (gen_idx, region); // adjust survival for this heap dprintf (REGIONS_LOG, ("heap: %d surv: %Id + %Id = %Id", heap_num, surv_per_heap[heap_num], heap_segment_survived (region), surv_per_heap[heap_num] + heap_segment_survived (region))); surv_per_heap[heap_num] += heap_segment_survived (region); if (heap_size_class < 0) { // no need to update size classes for heaps - // just work down the remaining regions, if any continue; } // is this heap now average or above? if (surv_per_heap[heap_num] >= avg_surv_per_heap) { // if so, unlink from the current size class heaps_by_deficit_size_class[heap_size_class] = next_heap_in_size_class[heap_num]; continue; } // otherwise compute the updated deficit size_t new_deficit = avg_surv_per_heap - surv_per_heap[heap_num]; // check if this heap moves to a differenct deficit size class int new_heap_size_class = (int)(new_deficit*deficit_scale_factor); if (new_heap_size_class != heap_size_class) { // the new deficit size class should be smaller and in range assert (new_heap_size_class < heap_size_class); assert ((0 <= new_heap_size_class) && (new_heap_size_class < NUM_SIZE_CLASSES)); // if so, unlink from the current size class heaps_by_deficit_size_class[heap_size_class] = next_heap_in_size_class[heap_num]; // and link to the new size class next_heap_in_size_class[heap_num] = heaps_by_deficit_size_class[new_heap_size_class]; heaps_by_deficit_size_class[new_heap_size_class] = heap_num; } } // we will generally be left with some heaps with deficits here, but that's ok // check we didn't screw up the data structures for (int i = 0; i < n_heaps; i++) { g_heaps[i]->verify_regions (gen_idx, false); } #ifdef TRACE_GC max_surv_per_heap = 0; for (int i = 0; i < n_heaps; i++) { max_surv_per_heap = max (max_surv_per_heap, surv_per_heap[i]); } if (avg_surv_per_heap != 0) { dprintf (REGIONS_LOG, ("after equalize: gen: %d avg surv: %Id max_surv: %Id imbalance: %d", gen_idx, avg_surv_per_heap, max_surv_per_heap, max_surv_per_heap*100/avg_surv_per_heap)); } #endif // TRACE_GC } #endif //MULTIPLE_HEAPS } #endif //USE_REGIONS #if !defined(USE_REGIONS) || defined(_DEBUG) inline void gc_heap::init_promoted_bytes() { #ifdef MULTIPLE_HEAPS g_promoted [heap_number*16] = 0; #else //MULTIPLE_HEAPS g_promoted = 0; #endif //MULTIPLE_HEAPS } size_t& gc_heap::promoted_bytes (int thread) { #ifdef MULTIPLE_HEAPS return g_promoted [thread*16]; #else //MULTIPLE_HEAPS UNREFERENCED_PARAMETER(thread); return g_promoted; #endif //MULTIPLE_HEAPS } #endif //!USE_REGIONS || _DEBUG inline void gc_heap::add_to_promoted_bytes (uint8_t* object, int thread) { size_t obj_size = size (object); add_to_promoted_bytes (object, obj_size, thread); } inline void gc_heap::add_to_promoted_bytes (uint8_t* object, size_t obj_size, int thread) { assert (thread == heap_number); #ifdef USE_REGIONS if (survived_per_region) { survived_per_region[get_basic_region_index_for_address (object)] += obj_size; } #endif //USE_REGIONS #if !defined(USE_REGIONS) || defined(_DEBUG) #ifdef MULTIPLE_HEAPS g_promoted [heap_number*16] += obj_size; #else //MULTIPLE_HEAPS g_promoted += obj_size; #endif //MULTIPLE_HEAPS #endif //!USE_REGIONS || _DEBUG #ifdef _DEBUG // Verify we keep the 2 recordings in sync. //get_promoted_bytes(); #endif //_DEBUG } heap_segment* gc_heap::find_segment (uint8_t* interior, BOOL small_segment_only_p) { heap_segment* seg = seg_mapping_table_segment_of (interior); if (seg) { if (small_segment_only_p && heap_segment_uoh_p (seg)) return 0; } return seg; } #if !defined(_DEBUG) && !defined(__GNUC__) inline // This causes link errors if global optimization is off #endif //!_DEBUG && !__GNUC__ gc_heap* gc_heap::heap_of (uint8_t* o) { #ifdef MULTIPLE_HEAPS if (o == 0) return g_heaps [0]; gc_heap* hp = seg_mapping_table_heap_of (o); return (hp ? hp : g_heaps[0]); #else //MULTIPLE_HEAPS UNREFERENCED_PARAMETER(o); return __this; #endif //MULTIPLE_HEAPS } inline gc_heap* gc_heap::heap_of_gc (uint8_t* o) { #ifdef MULTIPLE_HEAPS if (o == 0) return g_heaps [0]; gc_heap* hp = seg_mapping_table_heap_of_gc (o); return (hp ? hp : g_heaps[0]); #else //MULTIPLE_HEAPS UNREFERENCED_PARAMETER(o); return __this; #endif //MULTIPLE_HEAPS } // will find all heap objects (large and small) // // Callers of this method need to guarantee the interior pointer is within the heap range. // // If you need it to be stricter, eg if you only want to find an object in ephemeral range, // you should make sure interior is within that range before calling this method. uint8_t* gc_heap::find_object (uint8_t* interior) { assert (interior != 0); if (!gen0_bricks_cleared) { #ifdef MULTIPLE_HEAPS assert (!"Should have already been done in server GC"); #endif //MULTIPLE_HEAPS clear_gen0_bricks(); } //indicate that in the future this needs to be done during allocation gen0_must_clear_bricks = FFIND_DECAY; int brick_entry = get_brick_entry(brick_of (interior)); if (brick_entry == 0) { // this is a pointer to a UOH object heap_segment* seg = find_segment (interior, FALSE); if (seg) { #ifdef FEATURE_CONSERVATIVE_GC if (interior >= heap_segment_allocated(seg)) return 0; #endif // If interior falls within the first free object at the beginning of a generation, // we don't have brick entry for it, and we may incorrectly treat it as on large object heap. int align_const = get_alignment_constant (heap_segment_read_only_p (seg) #ifdef FEATURE_CONSERVATIVE_GC || (GCConfig::GetConservativeGC() && !heap_segment_uoh_p (seg)) #endif ); assert (interior < heap_segment_allocated (seg)); uint8_t* o = heap_segment_mem (seg); while (o < heap_segment_allocated (seg)) { uint8_t* next_o = o + Align (size (o), align_const); assert (next_o > o); if ((o <= interior) && (interior < next_o)) return o; o = next_o; } return 0; } else { return 0; } } else { heap_segment* seg = find_segment (interior, TRUE); if (seg) { #ifdef FEATURE_CONSERVATIVE_GC if (interior >= heap_segment_allocated (seg)) return 0; #else assert (interior < heap_segment_allocated (seg)); #endif uint8_t* o = find_first_object (interior, heap_segment_mem (seg)); return o; } else return 0; } } #ifdef MULTIPLE_HEAPS #ifdef GC_CONFIG_DRIVEN #define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;} else {mark_list_index++;}} #else //GC_CONFIG_DRIVEN #define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;}} #endif //GC_CONFIG_DRIVEN #define m_boundary_fullgc(o) {} #else //MULTIPLE_HEAPS #ifdef GC_CONFIG_DRIVEN #define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;} else {mark_list_index++;} if (slow > o) slow = o; if (shigh < o) shigh = o;} #else #define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;}if (slow > o) slow = o; if (shigh < o) shigh = o;} #endif //GC_CONFIG_DRIVEN #define m_boundary_fullgc(o) {if (slow > o) slow = o; if (shigh < o) shigh = o;} #endif //MULTIPLE_HEAPS #define method_table(o) ((CObjectHeader*)(o))->GetMethodTable() inline BOOL gc_heap::gc_mark1 (uint8_t* o) { BOOL marked = !marked (o); set_marked (o); dprintf (3, ("*%Ix*, newly marked: %d", (size_t)o, marked)); #if defined(USE_REGIONS) && defined(_DEBUG) heap_segment* seg = seg_mapping_table_segment_of (o); if (o > heap_segment_allocated (seg)) { dprintf (REGIONS_LOG, ("%Ix is in seg %Ix(%Ix) but beyond alloc %Ix!!", o, (size_t)seg, heap_segment_mem (seg), heap_segment_allocated (seg))); GCToOSInterface::DebugBreak(); } #endif //USE_REGIONS && _DEBUG return marked; } #ifdef USE_REGIONS inline bool is_in_heap_range (uint8_t* o) { #ifdef FEATURE_BASICFREEZE // we may have frozen objects in read only segments // outside of the reserved address range of the gc heap assert (((g_gc_lowest_address <= o) && (o < g_gc_highest_address)) || (o == nullptr) || (ro_segment_lookup (o) != nullptr)); return ((g_gc_lowest_address <= o) && (o < g_gc_highest_address)); #else //FEATURE_BASICFREEZE // without frozen objects, every non-null pointer must be // within the heap assert ((o == nullptr) || (g_gc_lowest_address <= o) && (o < g_gc_highest_address)); return (o != nullptr); #endif //FEATURE_BASICFREEZE } #endif //USE_REGIONS inline BOOL gc_heap::gc_mark (uint8_t* o, uint8_t* low, uint8_t* high, int condemned_gen) { #ifdef USE_REGIONS assert (low == 0); assert (high == 0); if (is_in_heap_range (o)) { BOOL already_marked = marked (o); if (already_marked) return FALSE; if (condemned_gen == max_generation) { set_marked (o); return TRUE; } int gen = get_region_gen_num (o); if (gen <= condemned_gen) { set_marked (o); return TRUE; } } return FALSE; #else //USE_REGIONS assert (condemned_gen == -1); BOOL marked = FALSE; if ((o >= low) && (o < high)) marked = gc_mark1 (o); #ifdef MULTIPLE_HEAPS else if (o) { gc_heap* hp = heap_of_gc (o); assert (hp); if ((o >= hp->gc_low) && (o < hp->gc_high)) marked = gc_mark1 (o); } #ifdef SNOOP_STATS snoop_stat.objects_checked_count++; if (marked) { snoop_stat.objects_marked_count++; } if (!o) { snoop_stat.zero_ref_count++; } #endif //SNOOP_STATS #endif //MULTIPLE_HEAPS return marked; #endif //USE_REGIONS } #ifdef BACKGROUND_GC inline BOOL gc_heap::background_marked (uint8_t* o) { return mark_array_marked (o); } inline BOOL gc_heap::background_mark1 (uint8_t* o) { BOOL to_mark = !mark_array_marked (o); dprintf (3, ("b*%Ix*b(%d)", (size_t)o, (to_mark ? 1 : 0))); if (to_mark) { mark_array_set_marked (o); dprintf (4, ("n*%Ix*n", (size_t)o)); return TRUE; } else return FALSE; } // TODO: we could consider filtering out NULL's here instead of going to // look for it on other heaps inline BOOL gc_heap::background_mark (uint8_t* o, uint8_t* low, uint8_t* high) { BOOL marked = FALSE; if ((o >= low) && (o < high)) marked = background_mark1 (o); #ifdef MULTIPLE_HEAPS else if (o) { gc_heap* hp = heap_of (o); assert (hp); if ((o >= hp->background_saved_lowest_address) && (o < hp->background_saved_highest_address)) marked = background_mark1 (o); } #endif //MULTIPLE_HEAPS return marked; } #endif //BACKGROUND_GC #define new_start() {if (ppstop <= start) {break;} else {parm = start}} #define ignore_start 0 #define use_start 1 #define go_through_object(mt,o,size,parm,start,start_useful,limit,exp) \ { \ CGCDesc* map = CGCDesc::GetCGCDescFromMT((MethodTable*)(mt)); \ CGCDescSeries* cur = map->GetHighestSeries(); \ ptrdiff_t cnt = (ptrdiff_t) map->GetNumSeries(); \ \ if (cnt >= 0) \ { \ CGCDescSeries* last = map->GetLowestSeries(); \ uint8_t** parm = 0; \ do \ { \ assert (parm <= (uint8_t**)((o) + cur->GetSeriesOffset())); \ parm = (uint8_t**)((o) + cur->GetSeriesOffset()); \ uint8_t** ppstop = \ (uint8_t**)((uint8_t*)parm + cur->GetSeriesSize() + (size));\ if (!start_useful || (uint8_t*)ppstop > (start)) \ { \ if (start_useful && (uint8_t*)parm < (start)) parm = (uint8_t**)(start);\ while (parm < ppstop) \ { \ {exp} \ parm++; \ } \ } \ cur--; \ \ } while (cur >= last); \ } \ else \ { \ /* Handle the repeating case - array of valuetypes */ \ uint8_t** parm = (uint8_t**)((o) + cur->startoffset); \ if (start_useful && start > (uint8_t*)parm) \ { \ ptrdiff_t cs = mt->RawGetComponentSize(); \ parm = (uint8_t**)((uint8_t*)parm + (((start) - (uint8_t*)parm)/cs)*cs); \ } \ while ((uint8_t*)parm < ((o)+(size)-plug_skew)) \ { \ for (ptrdiff_t __i = 0; __i > cnt; __i--) \ { \ HALF_SIZE_T skip = cur->val_serie[__i].skip; \ HALF_SIZE_T nptrs = cur->val_serie[__i].nptrs; \ uint8_t** ppstop = parm + nptrs; \ if (!start_useful || (uint8_t*)ppstop > (start)) \ { \ if (start_useful && (uint8_t*)parm < (start)) parm = (uint8_t**)(start); \ do \ { \ {exp} \ parm++; \ } while (parm < ppstop); \ } \ parm = (uint8_t**)((uint8_t*)ppstop + skip); \ } \ } \ } \ } #define go_through_object_nostart(mt,o,size,parm,exp) {go_through_object(mt,o,size,parm,o,ignore_start,(o + size),exp); } // 1 thing to note about this macro: // 1) you can use *parm safely but in general you don't want to use parm // because for the collectible types it's not an address on the managed heap. #ifndef COLLECTIBLE_CLASS #define go_through_object_cl(mt,o,size,parm,exp) \ { \ if (header(o)->ContainsPointers()) \ { \ go_through_object_nostart(mt,o,size,parm,exp); \ } \ } #else //COLLECTIBLE_CLASS #define go_through_object_cl(mt,o,size,parm,exp) \ { \ if (header(o)->Collectible()) \ { \ uint8_t* class_obj = get_class_object (o); \ uint8_t** parm = &class_obj; \ do {exp} while (false); \ } \ if (header(o)->ContainsPointers()) \ { \ go_through_object_nostart(mt,o,size,parm,exp); \ } \ } #endif //COLLECTIBLE_CLASS // This starts a plug. But mark_stack_tos isn't increased until set_pinned_info is called. void gc_heap::enque_pinned_plug (uint8_t* plug, BOOL save_pre_plug_info_p, uint8_t* last_object_in_last_plug) { if (mark_stack_array_length <= mark_stack_tos) { if (!grow_mark_stack (mark_stack_array, mark_stack_array_length, MARK_STACK_INITIAL_LENGTH)) { // we don't want to continue here due to security // risks. This happens very rarely and fixing it in the // way so that we can continue is a bit involved and will // not be done in Dev10. GCToEEInterface::HandleFatalError((unsigned int)CORINFO_EXCEPTION_GC); } } dprintf (3, ("enqueuing P #%Id(%Ix): %Ix. oldest: %Id, LO: %Ix, pre: %d", mark_stack_tos, &mark_stack_array[mark_stack_tos], plug, mark_stack_bos, last_object_in_last_plug, (save_pre_plug_info_p ? 1 : 0))); mark& m = mark_stack_array[mark_stack_tos]; m.first = plug; // Must be set now because if we have a short object we'll need the value of saved_pre_p. m.saved_pre_p = save_pre_plug_info_p; if (save_pre_plug_info_p) { // In the case of short plugs or doubly linked free lists, there may be extra bits // set in the method table pointer. // Clear these bits for the copy saved in saved_pre_plug, but not for the copy // saved in saved_pre_plug_reloc. // This is because we need these bits for compaction, but not for mark & sweep. size_t special_bits = clear_special_bits (last_object_in_last_plug); // now copy the bits over memcpy (&(m.saved_pre_plug), &(((plug_and_gap*)plug)[-1]), sizeof (gap_reloc_pair)); // restore the bits in the original set_special_bits (last_object_in_last_plug, special_bits); memcpy (&(m.saved_pre_plug_reloc), &(((plug_and_gap*)plug)[-1]), sizeof (gap_reloc_pair)); // If the last object in the last plug is too short, it requires special handling. size_t last_obj_size = plug - last_object_in_last_plug; if (last_obj_size < min_pre_pin_obj_size) { record_interesting_data_point (idp_pre_short); #ifdef SHORT_PLUGS if (is_plug_padded (last_object_in_last_plug)) record_interesting_data_point (idp_pre_short_padded); #endif //SHORT_PLUGS dprintf (3, ("encountered a short object %Ix right before pinned plug %Ix!", last_object_in_last_plug, plug)); // Need to set the short bit regardless of having refs or not because we need to // indicate that this object is not walkable. m.set_pre_short(); #ifdef COLLECTIBLE_CLASS if (is_collectible (last_object_in_last_plug)) { m.set_pre_short_collectible(); } #endif //COLLECTIBLE_CLASS if (contain_pointers (last_object_in_last_plug)) { dprintf (3, ("short object: %Ix(%Ix)", last_object_in_last_plug, last_obj_size)); go_through_object_nostart (method_table(last_object_in_last_plug), last_object_in_last_plug, last_obj_size, pval, { size_t gap_offset = (((size_t)pval - (size_t)(plug - sizeof (gap_reloc_pair) - plug_skew))) / sizeof (uint8_t*); dprintf (3, ("member: %Ix->%Ix, %Id ptrs from beginning of gap", (uint8_t*)pval, *pval, gap_offset)); m.set_pre_short_bit (gap_offset); } ); } } } m.saved_post_p = FALSE; } void gc_heap::save_post_plug_info (uint8_t* last_pinned_plug, uint8_t* last_object_in_last_plug, uint8_t* post_plug) { #ifndef _DEBUG UNREFERENCED_PARAMETER(last_pinned_plug); #endif //_DEBUG mark& m = mark_stack_array[mark_stack_tos - 1]; assert (last_pinned_plug == m.first); m.saved_post_plug_info_start = (uint8_t*)&(((plug_and_gap*)post_plug)[-1]); // In the case of short plugs or doubly linked free lists, there may be extra bits // set in the method table pointer. // Clear these bits for the copy saved in saved_post_plug, but not for the copy // saved in saved_post_plug_reloc. // This is because we need these bits for compaction, but not for mark & sweep. // Note that currently none of these bits will ever be set in the object saved *after* // a pinned plug - this object is currently pinned along with the pinned object before it size_t special_bits = clear_special_bits (last_object_in_last_plug); memcpy (&(m.saved_post_plug), m.saved_post_plug_info_start, sizeof (gap_reloc_pair)); // restore the bits in the original set_special_bits (last_object_in_last_plug, special_bits); memcpy (&(m.saved_post_plug_reloc), m.saved_post_plug_info_start, sizeof (gap_reloc_pair)); // This is important - we need to clear all bits here except the last one. m.saved_post_p = TRUE; #ifdef _DEBUG m.saved_post_plug_debug.gap = 1; #endif //_DEBUG dprintf (3, ("PP %Ix has NP %Ix right after", last_pinned_plug, post_plug)); size_t last_obj_size = post_plug - last_object_in_last_plug; if (last_obj_size < min_pre_pin_obj_size) { dprintf (3, ("PP %Ix last obj %Ix is too short", last_pinned_plug, last_object_in_last_plug)); record_interesting_data_point (idp_post_short); #ifdef SHORT_PLUGS if (is_plug_padded (last_object_in_last_plug)) record_interesting_data_point (idp_post_short_padded); #endif //SHORT_PLUGS m.set_post_short(); #if defined (_DEBUG) && defined (VERIFY_HEAP) verify_pinned_queue_p = TRUE; #endif // _DEBUG && VERIFY_HEAP #ifdef COLLECTIBLE_CLASS if (is_collectible (last_object_in_last_plug)) { m.set_post_short_collectible(); } #endif //COLLECTIBLE_CLASS if (contain_pointers (last_object_in_last_plug)) { dprintf (3, ("short object: %Ix(%Ix)", last_object_in_last_plug, last_obj_size)); // TODO: since we won't be able to walk this object in relocation, we still need to // take care of collectible assemblies here. go_through_object_nostart (method_table(last_object_in_last_plug), last_object_in_last_plug, last_obj_size, pval, { size_t gap_offset = (((size_t)pval - (size_t)(post_plug - sizeof (gap_reloc_pair) - plug_skew))) / sizeof (uint8_t*); dprintf (3, ("member: %Ix->%Ix, %Id ptrs from beginning of gap", (uint8_t*)pval, *pval, gap_offset)); m.set_post_short_bit (gap_offset); } ); } } } //#define PREFETCH #ifdef PREFETCH __declspec(naked) void __fastcall Prefetch(void* addr) { __asm { PREFETCHT0 [ECX] ret }; } #else //PREFETCH inline void Prefetch (void* addr) { UNREFERENCED_PARAMETER(addr); } #endif //PREFETCH #ifdef MH_SC_MARK inline VOLATILE(uint8_t*)& gc_heap::ref_mark_stack (gc_heap* hp, int index) { return ((VOLATILE(uint8_t*)*)(hp->mark_stack_array))[index]; } #endif //MH_SC_MARK #define stolen 2 #define partial 1 #define partial_object 3 inline uint8_t* ref_from_slot (uint8_t* r) { return (uint8_t*)((size_t)r & ~(stolen | partial)); } inline BOOL stolen_p (uint8_t* r) { return (((size_t)r&2) && !((size_t)r&1)); } inline BOOL ready_p (uint8_t* r) { return ((size_t)r != 1); } inline BOOL partial_p (uint8_t* r) { return (((size_t)r&1) && !((size_t)r&2)); } inline BOOL straight_ref_p (uint8_t* r) { return (!stolen_p (r) && !partial_p (r)); } inline BOOL partial_object_p (uint8_t* r) { return (((size_t)r & partial_object) == partial_object); } inline BOOL ref_p (uint8_t* r) { return (straight_ref_p (r) || partial_object_p (r)); } void gc_heap::mark_object_simple1 (uint8_t* oo, uint8_t* start THREAD_NUMBER_DCL) { SERVER_SC_MARK_VOLATILE(uint8_t*)* mark_stack_tos = (SERVER_SC_MARK_VOLATILE(uint8_t*)*)mark_stack_array; SERVER_SC_MARK_VOLATILE(uint8_t*)* mark_stack_limit = (SERVER_SC_MARK_VOLATILE(uint8_t*)*)&mark_stack_array[mark_stack_array_length]; SERVER_SC_MARK_VOLATILE(uint8_t*)* mark_stack_base = mark_stack_tos; #ifdef SORT_MARK_STACK SERVER_SC_MARK_VOLATILE(uint8_t*)* sorted_tos = mark_stack_base; #endif //SORT_MARK_STACK // If we are doing a full GC we don't use mark list anyway so use m_boundary_fullgc that doesn't // update mark list. BOOL full_p = (settings.condemned_generation == max_generation); int condemned_gen = #ifdef USE_REGIONS settings.condemned_generation; #else -1; #endif //USE_REGIONS assert ((start >= oo) && (start < oo+size(oo))); #ifndef MH_SC_MARK *mark_stack_tos = oo; #endif //!MH_SC_MARK while (1) { #ifdef MULTIPLE_HEAPS #else //MULTIPLE_HEAPS const int thread = 0; #endif //MULTIPLE_HEAPS if (oo && ((size_t)oo != 4)) { size_t s = 0; if (stolen_p (oo)) { --mark_stack_tos; goto next_level; } else if (!partial_p (oo) && ((s = size (oo)) < (partial_size_th*sizeof (uint8_t*)))) { BOOL overflow_p = FALSE; if (mark_stack_tos + (s) /sizeof (uint8_t*) >= (mark_stack_limit - 1)) { size_t num_components = ((method_table(oo))->HasComponentSize() ? ((CObjectHeader*)oo)->GetNumComponents() : 0); if (mark_stack_tos + CGCDesc::GetNumPointers(method_table(oo), s, num_components) >= (mark_stack_limit - 1)) { overflow_p = TRUE; } } if (overflow_p == FALSE) { dprintf(3,("pushing mark for %Ix ", (size_t)oo)); go_through_object_cl (method_table(oo), oo, s, ppslot, { uint8_t* o = *ppslot; Prefetch(o); if (gc_mark (o, gc_low, gc_high, condemned_gen)) { if (full_p) { m_boundary_fullgc (o); } else { m_boundary (o); } add_to_promoted_bytes (o, thread); if (contain_pointers_or_collectible (o)) { *(mark_stack_tos++) = o; } } } ); } else { dprintf(3,("mark stack overflow for object %Ix ", (size_t)oo)); min_overflow_address = min (min_overflow_address, oo); max_overflow_address = max (max_overflow_address, oo); } } else { if (partial_p (oo)) { start = ref_from_slot (oo); oo = ref_from_slot (*(--mark_stack_tos)); dprintf (4, ("oo: %Ix, start: %Ix\n", (size_t)oo, (size_t)start)); assert ((oo < start) && (start < (oo + size (oo)))); } #ifdef COLLECTIBLE_CLASS else { // If there's a class object, push it now. We are guaranteed to have the slot since // we just popped one object off. if (is_collectible (oo)) { uint8_t* class_obj = get_class_object (oo); if (gc_mark (class_obj, gc_low, gc_high, condemned_gen)) { if (full_p) { m_boundary_fullgc (class_obj); } else { m_boundary (class_obj); } add_to_promoted_bytes (class_obj, thread); *(mark_stack_tos++) = class_obj; // The code below expects that the oo is still stored in the stack slot that was // just popped and it "pushes" it back just by incrementing the mark_stack_tos. // But the class_obj has just overwritten that stack slot and so the oo needs to // be stored to the new slot that's pointed to by the mark_stack_tos. *mark_stack_tos = oo; } } if (!contain_pointers (oo)) { goto next_level; } } #endif //COLLECTIBLE_CLASS s = size (oo); BOOL overflow_p = FALSE; if (mark_stack_tos + (num_partial_refs + 2) >= mark_stack_limit) { overflow_p = TRUE; } if (overflow_p == FALSE) { dprintf(3,("pushing mark for %Ix ", (size_t)oo)); //push the object and its current SERVER_SC_MARK_VOLATILE(uint8_t*)* place = ++mark_stack_tos; mark_stack_tos++; #ifdef MH_SC_MARK *(place-1) = 0; *(place) = (uint8_t*)partial; #endif //MH_SC_MARK int i = num_partial_refs; uint8_t* ref_to_continue = 0; go_through_object (method_table(oo), oo, s, ppslot, start, use_start, (oo + s), { uint8_t* o = *ppslot; Prefetch(o); if (gc_mark (o, gc_low, gc_high,condemned_gen)) { if (full_p) { m_boundary_fullgc (o); } else { m_boundary (o); } add_to_promoted_bytes (o, thread); if (contain_pointers_or_collectible (o)) { *(mark_stack_tos++) = o; if (--i == 0) { ref_to_continue = (uint8_t*)((size_t)(ppslot+1) | partial); goto more_to_do; } } } } ); //we are finished with this object assert (ref_to_continue == 0); #ifdef MH_SC_MARK assert ((*(place-1)) == (uint8_t*)0); #else //MH_SC_MARK *(place-1) = 0; #endif //MH_SC_MARK *place = 0; // shouldn't we decrease tos by 2 here?? more_to_do: if (ref_to_continue) { //update the start #ifdef MH_SC_MARK assert ((*(place-1)) == (uint8_t*)0); *(place-1) = (uint8_t*)((size_t)oo | partial_object); assert (((*place) == (uint8_t*)1) || ((*place) == (uint8_t*)2)); #endif //MH_SC_MARK *place = ref_to_continue; } } else { dprintf(3,("mark stack overflow for object %Ix ", (size_t)oo)); min_overflow_address = min (min_overflow_address, oo); max_overflow_address = max (max_overflow_address, oo); } } #ifdef SORT_MARK_STACK if (mark_stack_tos > sorted_tos + mark_stack_array_length/8) { rqsort1 (sorted_tos, mark_stack_tos-1); sorted_tos = mark_stack_tos-1; } #endif //SORT_MARK_STACK } next_level: if (!(mark_stack_empty_p())) { oo = *(--mark_stack_tos); start = oo; #ifdef SORT_MARK_STACK sorted_tos = min ((size_t)sorted_tos, (size_t)mark_stack_tos); #endif //SORT_MARK_STACK } else break; } } #ifdef MH_SC_MARK BOOL same_numa_node_p (int hn1, int hn2) { return (heap_select::find_numa_node_from_heap_no (hn1) == heap_select::find_numa_node_from_heap_no (hn2)); } int find_next_buddy_heap (int this_heap_number, int current_buddy, int n_heaps) { int hn = (current_buddy+1)%n_heaps; while (hn != current_buddy) { if ((this_heap_number != hn) && (same_numa_node_p (this_heap_number, hn))) return hn; hn = (hn+1)%n_heaps; } return current_buddy; } void gc_heap::mark_steal() { mark_stack_busy() = 0; //clear the mark stack in the snooping range for (int i = 0; i < max_snoop_level; i++) { ((VOLATILE(uint8_t*)*)(mark_stack_array))[i] = 0; } //pick the next heap as our buddy int thpn = find_next_buddy_heap (heap_number, heap_number, n_heaps); #ifdef SNOOP_STATS dprintf (SNOOP_LOG, ("(GC%d)heap%d: start snooping %d", settings.gc_index, heap_number, (heap_number+1)%n_heaps)); uint32_t begin_tick = GCToOSInterface::GetLowPrecisionTimeStamp(); #endif //SNOOP_STATS int idle_loop_count = 0; int first_not_ready_level = 0; while (1) { gc_heap* hp = g_heaps [thpn]; int level = first_not_ready_level; first_not_ready_level = 0; while (check_next_mark_stack (hp) && (level < (max_snoop_level-1))) { idle_loop_count = 0; #ifdef SNOOP_STATS snoop_stat.busy_count++; dprintf (SNOOP_LOG, ("heap%d: looking at next heap level %d stack contents: %Ix", heap_number, level, (int)((uint8_t**)(hp->mark_stack_array))[level])); #endif //SNOOP_STATS uint8_t* o = ref_mark_stack (hp, level); uint8_t* start = o; if (ref_p (o)) { mark_stack_busy() = 1; BOOL success = TRUE; uint8_t* next = (ref_mark_stack (hp, level+1)); if (ref_p (next)) { if (((size_t)o > 4) && !partial_object_p (o)) { //this is a normal object, not a partial mark tuple //success = (Interlocked::CompareExchangePointer (&ref_mark_stack (hp, level), 0, o)==o); success = (Interlocked::CompareExchangePointer (&ref_mark_stack (hp, level), (uint8_t*)4, o)==o); #ifdef SNOOP_STATS snoop_stat.interlocked_count++; if (success) snoop_stat.normal_count++; #endif //SNOOP_STATS } else { //it is a stolen entry, or beginning/ending of a partial mark level++; #ifdef SNOOP_STATS snoop_stat.stolen_or_pm_count++; #endif //SNOOP_STATS success = FALSE; } } else if (stolen_p (next)) { //ignore the stolen guy and go to the next level success = FALSE; level+=2; #ifdef SNOOP_STATS snoop_stat.stolen_entry_count++; #endif //SNOOP_STATS } else { assert (partial_p (next)); start = ref_from_slot (next); //re-read the object o = ref_from_slot (ref_mark_stack (hp, level)); if (o && start) { //steal the object success = (Interlocked::CompareExchangePointer (&ref_mark_stack (hp, level+1), (uint8_t*)stolen, next) == next); #ifdef SNOOP_STATS snoop_stat.interlocked_count++; if (success) { snoop_stat.partial_mark_parent_count++; } #endif //SNOOP_STATS } else { // stack is not ready, or o is completely different from the last time we read from this stack level. // go up 2 levels to steal children or totally unrelated objects. success = FALSE; if (first_not_ready_level == 0) { first_not_ready_level = level; } level+=2; #ifdef SNOOP_STATS snoop_stat.pm_not_ready_count++; #endif //SNOOP_STATS } } if (success) { #ifdef SNOOP_STATS dprintf (SNOOP_LOG, ("heap%d: marking %Ix from %d [%d] tl:%dms", heap_number, (size_t)o, (heap_number+1)%n_heaps, level, (GCToOSInterface::GetLowPrecisionTimeStamp()-begin_tick))); uint32_t start_tick = GCToOSInterface::GetLowPrecisionTimeStamp(); #endif //SNOOP_STATS mark_object_simple1 (o, start, heap_number); #ifdef SNOOP_STATS dprintf (SNOOP_LOG, ("heap%d: done marking %Ix from %d [%d] %dms tl:%dms", heap_number, (size_t)o, (heap_number+1)%n_heaps, level, (GCToOSInterface::GetLowPrecisionTimeStamp()-start_tick),(GCToOSInterface::GetLowPrecisionTimeStamp()-begin_tick))); #endif //SNOOP_STATS mark_stack_busy() = 0; //clear the mark stack in snooping range for (int i = 0; i < max_snoop_level; i++) { if (((uint8_t**)mark_stack_array)[i] != 0) { ((VOLATILE(uint8_t*)*)(mark_stack_array))[i] = 0; #ifdef SNOOP_STATS snoop_stat.stack_bottom_clear_count++; #endif //SNOOP_STATS } } level = 0; } mark_stack_busy() = 0; } else { //slot is either partial or stolen level++; } } if ((first_not_ready_level != 0) && hp->mark_stack_busy()) { continue; } if (!hp->mark_stack_busy()) { first_not_ready_level = 0; idle_loop_count++; if ((idle_loop_count % (6) )==1) { #ifdef SNOOP_STATS snoop_stat.switch_to_thread_count++; #endif //SNOOP_STATS GCToOSInterface::Sleep(1); } int free_count = 1; #ifdef SNOOP_STATS snoop_stat.stack_idle_count++; //dprintf (SNOOP_LOG, ("heap%d: counting idle threads", heap_number)); #endif //SNOOP_STATS for (int hpn = (heap_number+1)%n_heaps; hpn != heap_number;) { if (!((g_heaps [hpn])->mark_stack_busy())) { free_count++; #ifdef SNOOP_STATS dprintf (SNOOP_LOG, ("heap%d: %d idle", heap_number, free_count)); #endif //SNOOP_STATS } else if (same_numa_node_p (hpn, heap_number) || ((idle_loop_count%1000))==999) { thpn = hpn; break; } hpn = (hpn+1)%n_heaps; YieldProcessor(); } if (free_count == n_heaps) { break; } } } } inline BOOL gc_heap::check_next_mark_stack (gc_heap* next_heap) { #ifdef SNOOP_STATS snoop_stat.check_level_count++; #endif //SNOOP_STATS return (next_heap->mark_stack_busy()>=1); } #endif //MH_SC_MARK #ifdef SNOOP_STATS void gc_heap::print_snoop_stat() { dprintf (1234, ("%4s | %8s | %8s | %8s | %8s | %8s | %8s | %8s", "heap", "check", "zero", "mark", "stole", "pstack", "nstack", "nonsk")); dprintf (1234, ("%4d | %8d | %8d | %8d | %8d | %8d | %8d | %8d", snoop_stat.heap_index, snoop_stat.objects_checked_count, snoop_stat.zero_ref_count, snoop_stat.objects_marked_count, snoop_stat.stolen_stack_count, snoop_stat.partial_stack_count, snoop_stat.normal_stack_count, snoop_stat.non_stack_count)); dprintf (1234, ("%4s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s", "heap", "level", "busy", "xchg", "pmparent", "s_pm", "stolen", "nready", "clear")); dprintf (1234, ("%4d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d\n", snoop_stat.heap_index, snoop_stat.check_level_count, snoop_stat.busy_count, snoop_stat.interlocked_count, snoop_stat.partial_mark_parent_count, snoop_stat.stolen_or_pm_count, snoop_stat.stolen_entry_count, snoop_stat.pm_not_ready_count, snoop_stat.normal_count, snoop_stat.stack_bottom_clear_count)); printf ("\n%4s | %8s | %8s | %8s | %8s | %8s\n", "heap", "check", "zero", "mark", "idle", "switch"); printf ("%4d | %8d | %8d | %8d | %8d | %8d\n", snoop_stat.heap_index, snoop_stat.objects_checked_count, snoop_stat.zero_ref_count, snoop_stat.objects_marked_count, snoop_stat.stack_idle_count, snoop_stat.switch_to_thread_count); printf ("%4s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s\n", "heap", "level", "busy", "xchg", "pmparent", "s_pm", "stolen", "nready", "normal", "clear"); printf ("%4d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d\n", snoop_stat.heap_index, snoop_stat.check_level_count, snoop_stat.busy_count, snoop_stat.interlocked_count, snoop_stat.partial_mark_parent_count, snoop_stat.stolen_or_pm_count, snoop_stat.stolen_entry_count, snoop_stat.pm_not_ready_count, snoop_stat.normal_count, snoop_stat.stack_bottom_clear_count); } #endif //SNOOP_STATS #ifdef HEAP_ANALYZE void gc_heap::ha_mark_object_simple (uint8_t** po THREAD_NUMBER_DCL) { if (!internal_root_array) { internal_root_array = new (nothrow) uint8_t* [internal_root_array_length]; if (!internal_root_array) { heap_analyze_success = FALSE; } } if (heap_analyze_success && (internal_root_array_length <= internal_root_array_index)) { size_t new_size = 2*internal_root_array_length; uint64_t available_physical = 0; get_memory_info (NULL, &available_physical); if (new_size > (size_t)(available_physical / 10)) { heap_analyze_success = FALSE; } else { uint8_t** tmp = new (nothrow) uint8_t* [new_size]; if (tmp) { memcpy (tmp, internal_root_array, internal_root_array_length*sizeof (uint8_t*)); delete[] internal_root_array; internal_root_array = tmp; internal_root_array_length = new_size; } else { heap_analyze_success = FALSE; } } } if (heap_analyze_success) { PREFIX_ASSUME(internal_root_array_index < internal_root_array_length); uint8_t* ref = (uint8_t*)po; if (!current_obj || !((ref >= current_obj) && (ref < (current_obj + current_obj_size)))) { gc_heap* hp = gc_heap::heap_of (ref); current_obj = hp->find_object (ref); current_obj_size = size (current_obj); internal_root_array[internal_root_array_index] = current_obj; internal_root_array_index++; } } mark_object_simple (po THREAD_NUMBER_ARG); } #endif //HEAP_ANALYZE //this method assumes that *po is in the [low. high[ range void gc_heap::mark_object_simple (uint8_t** po THREAD_NUMBER_DCL) { int condemned_gen = #ifdef USE_REGIONS settings.condemned_generation; #else -1; #endif //USE_REGIONS uint8_t* o = *po; #ifndef MULTIPLE_HEAPS const int thread = 0; #endif //MULTIPLE_HEAPS { #ifdef SNOOP_STATS snoop_stat.objects_checked_count++; #endif //SNOOP_STATS if (gc_mark1 (o)) { m_boundary (o); size_t s = size (o); add_to_promoted_bytes (o, s, thread); { go_through_object_cl (method_table(o), o, s, poo, { uint8_t* oo = *poo; if (gc_mark (oo, gc_low, gc_high, condemned_gen)) { m_boundary (oo); add_to_promoted_bytes (oo, thread); if (contain_pointers_or_collectible (oo)) mark_object_simple1 (oo, oo THREAD_NUMBER_ARG); } } ); } } } } inline void gc_heap::mark_object (uint8_t* o THREAD_NUMBER_DCL) { #ifdef USE_REGIONS if (is_in_heap_range (o) && is_in_condemned_gc (o)) { mark_object_simple (&o THREAD_NUMBER_ARG); } #else //USE_REGIONS if ((o >= gc_low) && (o < gc_high)) mark_object_simple (&o THREAD_NUMBER_ARG); #ifdef MULTIPLE_HEAPS else if (o) { gc_heap* hp = heap_of (o); assert (hp); if ((o >= hp->gc_low) && (o < hp->gc_high)) mark_object_simple (&o THREAD_NUMBER_ARG); } #endif //MULTIPLE_HEAPS #endif //USE_REGIONS } #ifdef BACKGROUND_GC #ifdef USE_REGIONS void gc_heap::set_background_overflow_p (uint8_t* oo) { heap_segment* overflow_region = get_region_info_for_address (oo); overflow_region->flags |= heap_segment_flags_overflow; dprintf (3,("setting overflow flag for region %p", heap_segment_mem (overflow_region))); #ifdef MULTIPLE_HEAPS gc_heap* overflow_heap = heap_segment_heap (overflow_region); #else gc_heap* overflow_heap = nullptr; #endif overflow_heap->background_overflow_p = TRUE; } #endif //USE_REGIONS void gc_heap::background_mark_simple1 (uint8_t* oo THREAD_NUMBER_DCL) { uint8_t** mark_stack_limit = &background_mark_stack_array[background_mark_stack_array_length]; #ifdef SORT_MARK_STACK uint8_t** sorted_tos = background_mark_stack_array; #endif //SORT_MARK_STACK background_mark_stack_tos = background_mark_stack_array; while (1) { #ifdef MULTIPLE_HEAPS #else //MULTIPLE_HEAPS const int thread = 0; #endif //MULTIPLE_HEAPS if (oo) { size_t s = 0; if ((((size_t)oo & 1) == 0) && ((s = size (oo)) < (partial_size_th*sizeof (uint8_t*)))) { BOOL overflow_p = FALSE; if (background_mark_stack_tos + (s) /sizeof (uint8_t*) >= (mark_stack_limit - 1)) { size_t num_components = ((method_table(oo))->HasComponentSize() ? ((CObjectHeader*)oo)->GetNumComponents() : 0); size_t num_pointers = CGCDesc::GetNumPointers(method_table(oo), s, num_components); if (background_mark_stack_tos + num_pointers >= (mark_stack_limit - 1)) { dprintf (2, ("h%d: %Id left, obj (mt: %Ix) %Id ptrs", heap_number, (size_t)(mark_stack_limit - 1 - background_mark_stack_tos), method_table(oo), num_pointers)); bgc_overflow_count++; overflow_p = TRUE; } } if (overflow_p == FALSE) { dprintf(3,("pushing mark for %Ix ", (size_t)oo)); go_through_object_cl (method_table(oo), oo, s, ppslot, { uint8_t* o = *ppslot; Prefetch(o); if (background_mark (o, background_saved_lowest_address, background_saved_highest_address)) { //m_boundary (o); size_t obj_size = size (o); bpromoted_bytes (thread) += obj_size; if (contain_pointers_or_collectible (o)) { *(background_mark_stack_tos++) = o; } } } ); } else { dprintf (3,("background mark stack overflow for object %Ix ", (size_t)oo)); #ifdef USE_REGIONS set_background_overflow_p (oo); #else //USE_REGIONS background_min_overflow_address = min (background_min_overflow_address, oo); background_max_overflow_address = max (background_max_overflow_address, oo); #endif //USE_REGIONS } } else { uint8_t* start = oo; if ((size_t)oo & 1) { oo = (uint8_t*)((size_t)oo & ~1); start = *(--background_mark_stack_tos); dprintf (4, ("oo: %Ix, start: %Ix\n", (size_t)oo, (size_t)start)); } #ifdef COLLECTIBLE_CLASS else { // If there's a class object, push it now. We are guaranteed to have the slot since // we just popped one object off. if (is_collectible (oo)) { uint8_t* class_obj = get_class_object (oo); if (background_mark (class_obj, background_saved_lowest_address, background_saved_highest_address)) { size_t obj_size = size (class_obj); bpromoted_bytes (thread) += obj_size; *(background_mark_stack_tos++) = class_obj; } } if (!contain_pointers (oo)) { goto next_level; } } #endif //COLLECTIBLE_CLASS s = size (oo); BOOL overflow_p = FALSE; if (background_mark_stack_tos + (num_partial_refs + 2) >= mark_stack_limit) { size_t num_components = ((method_table(oo))->HasComponentSize() ? ((CObjectHeader*)oo)->GetNumComponents() : 0); size_t num_pointers = CGCDesc::GetNumPointers(method_table(oo), s, num_components); dprintf (2, ("h%d: PM: %Id left, obj %Ix (mt: %Ix) start: %Ix, total: %Id", heap_number, (size_t)(mark_stack_limit - background_mark_stack_tos), oo, method_table(oo), start, num_pointers)); bgc_overflow_count++; overflow_p = TRUE; } if (overflow_p == FALSE) { dprintf(3,("pushing mark for %Ix ", (size_t)oo)); //push the object and its current uint8_t** place = background_mark_stack_tos++; *(place) = start; *(background_mark_stack_tos++) = (uint8_t*)((size_t)oo | 1); int num_pushed_refs = num_partial_refs; int num_processed_refs = num_pushed_refs * 16; go_through_object (method_table(oo), oo, s, ppslot, start, use_start, (oo + s), { uint8_t* o = *ppslot; Prefetch(o); if (background_mark (o, background_saved_lowest_address, background_saved_highest_address)) { //m_boundary (o); size_t obj_size = size (o); bpromoted_bytes (thread) += obj_size; if (contain_pointers_or_collectible (o)) { *(background_mark_stack_tos++) = o; if (--num_pushed_refs == 0) { //update the start *place = (uint8_t*)(ppslot+1); goto more_to_do; } } } if (--num_processed_refs == 0) { // give foreground GC a chance to run *place = (uint8_t*)(ppslot + 1); goto more_to_do; } } ); //we are finished with this object *place = 0; *(place+1) = 0; more_to_do:; } else { dprintf (3,("background mark stack overflow for object %Ix ", (size_t)oo)); #ifdef USE_REGIONS set_background_overflow_p (oo); #else //USE_REGIONS background_min_overflow_address = min (background_min_overflow_address, oo); background_max_overflow_address = max (background_max_overflow_address, oo); #endif //USE_REGIONS } } } #ifdef SORT_MARK_STACK if (background_mark_stack_tos > sorted_tos + mark_stack_array_length/8) { rqsort1 (sorted_tos, background_mark_stack_tos-1); sorted_tos = background_mark_stack_tos-1; } #endif //SORT_MARK_STACK #ifdef COLLECTIBLE_CLASS next_level: #endif // COLLECTIBLE_CLASS allow_fgc(); if (!(background_mark_stack_tos == background_mark_stack_array)) { oo = *(--background_mark_stack_tos); #ifdef SORT_MARK_STACK sorted_tos = (uint8_t**)min ((size_t)sorted_tos, (size_t)background_mark_stack_tos); #endif //SORT_MARK_STACK } else break; } assert (background_mark_stack_tos == background_mark_stack_array); } //this version is different than the foreground GC because //it can't keep pointers to the inside of an object //while calling background_mark_simple1. The object could be moved //by an intervening foreground gc. //this method assumes that *po is in the [low. high[ range void gc_heap::background_mark_simple (uint8_t* o THREAD_NUMBER_DCL) { #ifdef MULTIPLE_HEAPS #else //MULTIPLE_HEAPS const int thread = 0; #endif //MULTIPLE_HEAPS { dprintf (3, ("bmarking %Ix", o)); if (background_mark1 (o)) { //m_boundary (o); size_t s = size (o); bpromoted_bytes (thread) += s; if (contain_pointers_or_collectible (o)) { background_mark_simple1 (o THREAD_NUMBER_ARG); } } allow_fgc(); } } inline uint8_t* gc_heap::background_mark_object (uint8_t* o THREAD_NUMBER_DCL) { if ((o >= background_saved_lowest_address) && (o < background_saved_highest_address)) { background_mark_simple (o THREAD_NUMBER_ARG); } else { if (o) { dprintf (3, ("or-%Ix", o)); } } return o; } void gc_heap::background_promote (Object** ppObject, ScanContext* sc, uint32_t flags) { UNREFERENCED_PARAMETER(sc); //in order to save space on the array, mark the object, //knowing that it will be visited later assert (settings.concurrent); THREAD_NUMBER_FROM_CONTEXT; #ifndef MULTIPLE_HEAPS const int thread = 0; #endif //!MULTIPLE_HEAPS uint8_t* o = (uint8_t*)*ppObject; if (o == 0) return; #ifdef DEBUG_DestroyedHandleValue // we can race with destroy handle during concurrent scan if (o == (uint8_t*)DEBUG_DestroyedHandleValue) return; #endif //DEBUG_DestroyedHandleValue HEAP_FROM_THREAD; gc_heap* hp = gc_heap::heap_of (o); if ((o < hp->background_saved_lowest_address) || (o >= hp->background_saved_highest_address)) { return; } if (flags & GC_CALL_INTERIOR) { o = hp->find_object (o); if (o == 0) return; } #ifdef FEATURE_CONSERVATIVE_GC // For conservative GC, a value on stack may point to middle of a free object. // In this case, we don't need to promote the pointer. if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree()) { return; } #endif //FEATURE_CONSERVATIVE_GC #ifdef _DEBUG ((CObjectHeader*)o)->Validate(); #endif //_DEBUG //needs to be called before the marking because it is possible for a foreground //gc to take place during the mark and move the object STRESS_LOG3(LF_GC|LF_GCROOTS, LL_INFO1000000, " GCHeap::Promote: Promote GC Root *%p = %p MT = %pT", ppObject, o, o ? ((Object*) o)->GetGCSafeMethodTable() : NULL); hpt->background_mark_simple (o THREAD_NUMBER_ARG); } //used by the ephemeral collection to scan the local background structures //containing references. void gc_heap::scan_background_roots (promote_func* fn, int hn, ScanContext *pSC) { ScanContext sc; if (pSC == 0) pSC = &sc; pSC->thread_number = hn; BOOL relocate_p = (fn == &GCHeap::Relocate); dprintf (3, ("Scanning background mark list")); //scan mark_list size_t mark_list_finger = 0; while (mark_list_finger < c_mark_list_index) { uint8_t** o = &c_mark_list [mark_list_finger]; if (!relocate_p) { // We may not be able to calculate the size during relocate as POPO // may have written over the object. size_t s = size (*o); assert (Align (s) >= Align (min_obj_size)); dprintf(3,("background root %Ix", (size_t)*o)); } (*fn) ((Object**)o, pSC, 0); mark_list_finger++; } //scan the mark stack dprintf (3, ("Scanning background mark stack")); uint8_t** finger = background_mark_stack_array; while (finger < background_mark_stack_tos) { if ((finger + 1) < background_mark_stack_tos) { // We need to check for the partial mark case here. uint8_t* parent_obj = *(finger + 1); if ((size_t)parent_obj & 1) { uint8_t* place = *finger; size_t place_offset = 0; uint8_t* real_parent_obj = (uint8_t*)((size_t)parent_obj & ~1); if (relocate_p) { *(finger + 1) = real_parent_obj; place_offset = place - real_parent_obj; dprintf(3,("relocating background root %Ix", (size_t)real_parent_obj)); (*fn) ((Object**)(finger + 1), pSC, 0); real_parent_obj = *(finger + 1); *finger = real_parent_obj + place_offset; *(finger + 1) = (uint8_t*)((size_t)real_parent_obj | 1); dprintf(3,("roots changed to %Ix, %Ix", *finger, *(finger + 1))); } else { uint8_t** temp = &real_parent_obj; dprintf(3,("marking background root %Ix", (size_t)real_parent_obj)); (*fn) ((Object**)temp, pSC, 0); } finger += 2; continue; } } dprintf(3,("background root %Ix", (size_t)*finger)); (*fn) ((Object**)finger, pSC, 0); finger++; } } void gc_heap::grow_bgc_mark_stack (size_t new_size) { if ((background_mark_stack_array_length < new_size) && ((new_size - background_mark_stack_array_length) > (background_mark_stack_array_length / 2))) { dprintf (2, ("h%d: ov grow to %Id", heap_number, new_size)); uint8_t** tmp = new (nothrow) uint8_t* [new_size]; if (tmp) { delete [] background_mark_stack_array; background_mark_stack_array = tmp; background_mark_stack_array_length = new_size; background_mark_stack_tos = background_mark_stack_array; } } } void gc_heap::check_bgc_mark_stack_length() { if ((settings.condemned_generation < (max_generation - 1)) || gc_heap::background_running_p()) return; size_t total_heap_size = get_total_heap_size(); if (total_heap_size < ((size_t)4*1024*1024*1024)) return; #ifdef MULTIPLE_HEAPS int total_heaps = n_heaps; #else int total_heaps = 1; #endif //MULTIPLE_HEAPS size_t size_based_on_heap = total_heap_size / (size_t)(100 * 100 * total_heaps * sizeof (uint8_t*)); size_t new_size = max (background_mark_stack_array_length, size_based_on_heap); grow_bgc_mark_stack (new_size); } uint8_t* gc_heap::background_seg_end (heap_segment* seg, BOOL concurrent_p) { #ifndef USE_REGIONS if (concurrent_p && (seg == saved_overflow_ephemeral_seg)) { // for now we stop at where gen1 started when we started processing return background_min_soh_overflow_address; } else #endif //!USE_REGIONS { return heap_segment_allocated (seg); } } uint8_t* gc_heap::background_first_overflow (uint8_t* min_add, heap_segment* seg, BOOL concurrent_p, BOOL small_object_p) { #ifdef USE_REGIONS return heap_segment_mem (seg); #else uint8_t* o = 0; if (small_object_p) { if (in_range_for_segment (min_add, seg)) { // min_add was the beginning of gen1 when we did the concurrent // overflow. Now we could be in a situation where min_add is // actually the same as allocated for that segment (because // we expanded heap), in which case we can not call // find first on this address or we will AV. if (min_add >= heap_segment_allocated (seg)) { return min_add; } else { if (concurrent_p && ((seg == saved_overflow_ephemeral_seg) && (min_add >= background_min_soh_overflow_address))) { return background_min_soh_overflow_address; } else { o = find_first_object (min_add, heap_segment_mem (seg)); return o; } } } } o = max (heap_segment_mem (seg), min_add); return o; #endif //USE_REGIONS } void gc_heap::background_process_mark_overflow_internal (uint8_t* min_add, uint8_t* max_add, BOOL concurrent_p) { if (concurrent_p) { current_bgc_state = bgc_overflow_soh; } size_t total_marked_objects = 0; #ifdef MULTIPLE_HEAPS int thread = heap_number; #endif //MULTIPLE_HEAPS int start_gen_idx = get_start_generation_index(); #ifdef USE_REGIONS if (concurrent_p) start_gen_idx = max_generation; #endif //USE_REGIONS exclusive_sync* loh_alloc_lock = 0; #ifndef USE_REGIONS dprintf (2,("Processing Mark overflow [%Ix %Ix]", (size_t)min_add, (size_t)max_add)); #endif #ifdef MULTIPLE_HEAPS // We don't have each heap scan all heaps concurrently because we are worried about // multiple threads calling things like find_first_object. int h_start = (concurrent_p ? heap_number : 0); int h_end = (concurrent_p ? (heap_number + 1) : n_heaps); for (int hi = h_start; hi < h_end; hi++) { gc_heap* hp = (concurrent_p ? this : g_heaps [(heap_number + hi) % n_heaps]); #else { gc_heap* hp = 0; #endif //MULTIPLE_HEAPS BOOL small_object_segments = TRUE; loh_alloc_lock = hp->bgc_alloc_lock; for (int i = start_gen_idx; i < total_generation_count; i++) { int align_const = get_alignment_constant (small_object_segments); generation* gen = hp->generation_of (i); heap_segment* seg = heap_segment_in_range (generation_start_segment (gen)); PREFIX_ASSUME(seg != NULL); uint8_t* current_min_add = min_add; uint8_t* current_max_add = max_add; while (seg) { #ifdef USE_REGIONS if (heap_segment_overflow_p (seg)) { seg->flags &= ~heap_segment_flags_overflow; current_min_add = heap_segment_mem (seg); current_max_add = heap_segment_allocated (seg); dprintf (2,("Processing Mark overflow [%Ix %Ix]", (size_t)current_min_add, (size_t)current_max_add)); } else { current_min_add = current_max_add = 0; } #endif //USE_REGIONS uint8_t* o = hp->background_first_overflow (current_min_add, seg, concurrent_p, small_object_segments); while ((o < hp->background_seg_end (seg, concurrent_p)) && (o <= current_max_add)) { dprintf (3, ("considering %Ix", (size_t)o)); size_t s; if (concurrent_p && !small_object_segments) { loh_alloc_lock->bgc_mark_set (o); if (((CObjectHeader*)o)->IsFree()) { s = unused_array_size (o); } else { s = size (o); } } else { s = size (o); } if (background_object_marked (o, FALSE) && contain_pointers_or_collectible (o)) { total_marked_objects++; go_through_object_cl (method_table(o), o, s, poo, uint8_t* oo = *poo; background_mark_object (oo THREAD_NUMBER_ARG); ); } if (concurrent_p && !small_object_segments) { loh_alloc_lock->bgc_mark_done (); } o = o + Align (s, align_const); if (concurrent_p) { allow_fgc(); } } #ifdef USE_REGIONS if (current_max_add != 0) #endif //USE_REGIONS { dprintf (2, ("went through overflow objects in segment %Ix (%d) (so far %Id marked)", heap_segment_mem (seg), (small_object_segments ? 0 : 1), total_marked_objects)); } #ifndef USE_REGIONS if (concurrent_p && (seg == hp->saved_overflow_ephemeral_seg)) { break; } #endif //!USE_REGIONS seg = heap_segment_next_in_range (seg); } if (concurrent_p) { current_bgc_state = bgc_overflow_uoh; } dprintf (2, ("h%d: SOH: ov-mo: %Id", heap_number, total_marked_objects)); fire_overflow_event (min_add, max_add, total_marked_objects, i); if (small_object_segments) { concurrent_print_time_delta (concurrent_p ? "Cov SOH" : "Nov SOH"); } total_marked_objects = 0; small_object_segments = FALSE; } } } BOOL gc_heap::background_process_mark_overflow (BOOL concurrent_p) { BOOL grow_mark_array_p = TRUE; if (concurrent_p) { assert (!processed_eph_overflow_p); #ifndef USE_REGIONS if ((background_max_overflow_address != 0) && (background_min_overflow_address != MAX_PTR)) { // We have overflow to process but we know we can't process the ephemeral generations // now (we actually could process till the current gen1 start but since we are going to // make overflow per segment, for now I'll just stop at the saved gen1 start. saved_overflow_ephemeral_seg = ephemeral_heap_segment; background_max_soh_overflow_address = heap_segment_reserved (saved_overflow_ephemeral_seg); background_min_soh_overflow_address = generation_allocation_start (generation_of (max_generation - 1)); } #endif //!USE_REGIONS } else { #ifndef USE_REGIONS assert ((saved_overflow_ephemeral_seg == 0) || ((background_max_soh_overflow_address != 0) && (background_min_soh_overflow_address != MAX_PTR))); #endif //!USE_REGIONS if (!processed_eph_overflow_p) { // if there was no more overflow we just need to process what we didn't process // on the saved ephemeral segment. #ifdef USE_REGIONS if (!background_overflow_p) #else if ((background_max_overflow_address == 0) && (background_min_overflow_address == MAX_PTR)) #endif //USE_REGIONS { dprintf (2, ("final processing mark overflow - no more overflow since last time")); grow_mark_array_p = FALSE; } #ifdef USE_REGIONS background_overflow_p = TRUE; #else background_min_overflow_address = min (background_min_overflow_address, background_min_soh_overflow_address); background_max_overflow_address = max (background_max_overflow_address, background_max_soh_overflow_address); #endif //!USE_REGIONS processed_eph_overflow_p = TRUE; } } BOOL overflow_p = FALSE; recheck: #ifdef USE_REGIONS if (background_overflow_p) #else if ((! ((background_max_overflow_address == 0)) || ! ((background_min_overflow_address == MAX_PTR)))) #endif { overflow_p = TRUE; if (grow_mark_array_p) { // Try to grow the array. size_t new_size = max (MARK_STACK_INITIAL_LENGTH, 2*background_mark_stack_array_length); if ((new_size * sizeof(mark)) > 100*1024) { size_t new_max_size = (get_total_heap_size() / 10) / sizeof(mark); new_size = min(new_max_size, new_size); } grow_bgc_mark_stack (new_size); } else { grow_mark_array_p = TRUE; } #ifdef USE_REGIONS uint8_t* min_add = 0; uint8_t* max_add = 0; background_overflow_p = FALSE; #else uint8_t* min_add = background_min_overflow_address; uint8_t* max_add = background_max_overflow_address; background_max_overflow_address = 0; background_min_overflow_address = MAX_PTR; #endif background_process_mark_overflow_internal (min_add, max_add, concurrent_p); if (!concurrent_p) { goto recheck; } } return overflow_p; } #endif //BACKGROUND_GC inline void gc_heap::mark_through_object (uint8_t* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL) { #ifndef COLLECTIBLE_CLASS UNREFERENCED_PARAMETER(mark_class_object_p); BOOL to_mark_class_object = FALSE; #else //COLLECTIBLE_CLASS BOOL to_mark_class_object = (mark_class_object_p && (is_collectible(oo))); #endif //COLLECTIBLE_CLASS if (contain_pointers (oo) || to_mark_class_object) { dprintf(3,( "Marking through %Ix", (size_t)oo)); size_t s = size (oo); #ifdef COLLECTIBLE_CLASS if (to_mark_class_object) { uint8_t* class_obj = get_class_object (oo); mark_object (class_obj THREAD_NUMBER_ARG); } #endif //COLLECTIBLE_CLASS if (contain_pointers (oo)) { go_through_object_nostart (method_table(oo), oo, s, po, uint8_t* o = *po; mark_object (o THREAD_NUMBER_ARG); ); } } } size_t gc_heap::get_total_heap_size() { size_t total_heap_size = 0; // It's correct to start from max_generation for this method because // generation_sizes will return all SOH sizes when passed max_generation. #ifdef MULTIPLE_HEAPS int hn = 0; for (hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp2 = gc_heap::g_heaps [hn]; for (int i = max_generation; i < total_generation_count; i++) { total_heap_size += hp2->generation_sizes (hp2->generation_of (i)); } } #else for (int i = max_generation; i < total_generation_count; i++) { total_heap_size += generation_sizes (generation_of (i)); } #endif //MULTIPLE_HEAPS return total_heap_size; } size_t gc_heap::get_total_fragmentation() { size_t total_fragmentation = 0; #ifdef MULTIPLE_HEAPS for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps[hn]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS for (int i = 0; i < total_generation_count; i++) { generation* gen = hp->generation_of (i); total_fragmentation += (generation_free_list_space (gen) + generation_free_obj_space (gen)); } } return total_fragmentation; } size_t gc_heap::get_total_gen_fragmentation (int gen_number) { size_t total_fragmentation = 0; #ifdef MULTIPLE_HEAPS for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps[hn]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS generation* gen = hp->generation_of (gen_number); total_fragmentation += (generation_free_list_space (gen) + generation_free_obj_space (gen)); } return total_fragmentation; } size_t gc_heap::get_total_gen_estimated_reclaim (int gen_number) { size_t total_estimated_reclaim = 0; #ifdef MULTIPLE_HEAPS for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps[hn]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS total_estimated_reclaim += hp->estimated_reclaim (gen_number); } return total_estimated_reclaim; } size_t gc_heap::get_total_gen_size (int gen_number) { #ifdef MULTIPLE_HEAPS size_t size = 0; for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps[hn]; size += hp->generation_size (gen_number); } #else size_t size = generation_size (gen_number); #endif //MULTIPLE_HEAPS return size; } size_t gc_heap::committed_size() { size_t total_committed = 0; const size_t kB = 1024; for (int i = get_start_generation_index(); i < total_generation_count; i++) { generation* gen = generation_of (i); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); size_t gen_committed = 0; size_t gen_allocated = 0; while (seg) { uint8_t* start = #ifdef USE_REGIONS get_region_start (seg); #else (uint8_t*)seg; #endif //USE_REGIONS gen_committed += heap_segment_committed (seg) - start; gen_allocated += heap_segment_allocated (seg) - start; seg = heap_segment_next (seg); } dprintf (3, ("h%d committed in gen%d %IdkB, allocated %IdkB, committed-allocated %IdkB", heap_number, i, gen_committed/kB, gen_allocated/kB, (gen_committed - gen_allocated)/kB)); total_committed += gen_committed; } #ifdef USE_REGIONS size_t committed_in_free = 0; for (int kind = basic_free_region; kind < count_free_region_kinds; kind++) { committed_in_free += free_regions[kind].get_size_committed_in_free(); } dprintf (3, ("h%d committed in free %IdkB", heap_number, committed_in_free/kB)); total_committed += committed_in_free; #endif //USE_REGIONS return total_committed; } size_t gc_heap::get_total_committed_size() { size_t total_committed = 0; #ifdef MULTIPLE_HEAPS int hn = 0; for (hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; total_committed += hp->committed_size(); } #else total_committed = committed_size(); #endif //MULTIPLE_HEAPS return total_committed; } size_t gc_heap::uoh_committed_size (int gen_number, size_t* allocated) { generation* gen = generation_of (gen_number); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); size_t total_committed = 0; size_t total_allocated = 0; while (seg) { uint8_t* start = #ifdef USE_REGIONS get_region_start (seg); #else (uint8_t*)seg; #endif //USE_REGIONS total_committed += heap_segment_committed (seg) - start; total_allocated += heap_segment_allocated (seg) - start; seg = heap_segment_next (seg); } *allocated = total_allocated; return total_committed; } void gc_heap::get_memory_info (uint32_t* memory_load, uint64_t* available_physical, uint64_t* available_page_file) { GCToOSInterface::GetMemoryStatus(is_restricted_physical_mem ? total_physical_mem : 0, memory_load, available_physical, available_page_file); } //returns TRUE is an overflow happened. BOOL gc_heap::process_mark_overflow(int condemned_gen_number) { size_t last_promoted_bytes = get_promoted_bytes(); BOOL overflow_p = FALSE; recheck: if ((! (max_overflow_address == 0) || ! (min_overflow_address == MAX_PTR))) { overflow_p = TRUE; // Try to grow the array. size_t new_size = max (MARK_STACK_INITIAL_LENGTH, 2*mark_stack_array_length); if ((new_size * sizeof(mark)) > 100*1024) { size_t new_max_size = (get_total_heap_size() / 10) / sizeof(mark); new_size = min(new_max_size, new_size); } if ((mark_stack_array_length < new_size) && ((new_size - mark_stack_array_length) > (mark_stack_array_length / 2))) { mark* tmp = new (nothrow) mark [new_size]; if (tmp) { delete mark_stack_array; mark_stack_array = tmp; mark_stack_array_length = new_size; } } uint8_t* min_add = min_overflow_address; uint8_t* max_add = max_overflow_address; max_overflow_address = 0; min_overflow_address = MAX_PTR; process_mark_overflow_internal (condemned_gen_number, min_add, max_add); goto recheck; } size_t current_promoted_bytes = get_promoted_bytes(); if (current_promoted_bytes != last_promoted_bytes) fire_mark_event (ETW::GC_ROOT_OVERFLOW, current_promoted_bytes, last_promoted_bytes); return overflow_p; } void gc_heap::process_mark_overflow_internal (int condemned_gen_number, uint8_t* min_add, uint8_t* max_add) { #ifdef MULTIPLE_HEAPS int thread = heap_number; #endif //MULTIPLE_HEAPS BOOL full_p = (condemned_gen_number == max_generation); dprintf(3,("Processing Mark overflow [%Ix %Ix]", (size_t)min_add, (size_t)max_add)); size_t obj_count = 0; #ifdef MULTIPLE_HEAPS for (int hi = 0; hi < n_heaps; hi++) { gc_heap* hp = g_heaps [(heap_number + hi) % n_heaps]; #else { gc_heap* hp = 0; #endif //MULTIPLE_HEAPS int gen_limit = full_p ? total_generation_count : condemned_gen_number + 1; for (int i = get_stop_generation_index (condemned_gen_number); i < gen_limit; i++) { generation* gen = hp->generation_of (i); heap_segment* seg = heap_segment_in_range (generation_start_segment (gen)); int align_const = get_alignment_constant (i < uoh_start_generation); PREFIX_ASSUME(seg != NULL); while (seg) { uint8_t* o = max (heap_segment_mem (seg), min_add); uint8_t* end = heap_segment_allocated (seg); while ((o < end) && (o <= max_add)) { assert ((min_add <= o) && (max_add >= o)); dprintf (3, ("considering %Ix", (size_t)o)); if (marked (o)) { mark_through_object (o, TRUE THREAD_NUMBER_ARG); obj_count++; } o = o + Align (size (o), align_const); } seg = heap_segment_next_in_range (seg); } } #ifndef MULTIPLE_HEAPS // we should have found at least one object assert (obj_count > 0); #endif //MULTIPLE_HEAPS } } // Scanning for promotion for dependent handles need special handling. Because the primary holds a strong // reference to the secondary (when the primary itself is reachable) and this can cause a cascading series of // promotions (the secondary of one handle is or promotes the primary of another) we might need to perform the // promotion scan multiple times. // This helper encapsulates the logic to complete all dependent handle promotions when running a server GC. It // also has the effect of processing any mark stack overflow. #ifdef MULTIPLE_HEAPS // When multiple heaps are enabled we have must utilize a more complex algorithm in order to keep all the GC // worker threads synchronized. The algorithms are sufficiently divergent that we have different // implementations based on whether MULTIPLE_HEAPS is defined or not. // // Define some static variables used for synchronization in the method below. These should really be defined // locally but MSVC complains when the VOLATILE macro is expanded into an instantiation of the Volatile class. // // A note about the synchronization used within this method. Communication between the worker threads is // achieved via two shared booleans (defined below). These both act as latches that are transitioned only from // false -> true by unsynchronized code. They are only read or reset to false by a single thread under the // protection of a join. static VOLATILE(BOOL) s_fUnpromotedHandles = FALSE; static VOLATILE(BOOL) s_fUnscannedPromotions = FALSE; static VOLATILE(BOOL) s_fScanRequired; void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p) { // Whenever we call this method there may have been preceding object promotions. So set // s_fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set // based on the how the scanning proceeded). s_fUnscannedPromotions = TRUE; // We don't know how many times we need to loop yet. In particular we can't base the loop condition on // the state of this thread's portion of the dependent handle table. That's because promotions on other // threads could cause handle promotions to become necessary here. Even if there are definitely no more // promotions possible in this thread's handles, we still have to stay in lock-step with those worker // threads that haven't finished yet (each GC worker thread has to join exactly the same number of times // as all the others or they'll get out of step). while (true) { // The various worker threads are all currently racing in this code. We need to work out if at least // one of them think they have work to do this cycle. Each thread needs to rescan its portion of the // dependent handle table when both of the following conditions apply: // 1) At least one (arbitrary) object might have been promoted since the last scan (because if this // object happens to correspond to a primary in one of our handles we might potentially have to // promote the associated secondary). // 2) The table for this thread has at least one handle with a secondary that isn't promoted yet. // // The first condition is represented by s_fUnscannedPromotions. This is always non-zero for the first // iteration of this loop (see comment above) and in subsequent cycles each thread updates this // whenever a mark stack overflow occurs or scanning their dependent handles results in a secondary // being promoted. This value is cleared back to zero in a synchronized fashion in the join that // follows below. Note that we can't read this outside of the join since on any iteration apart from // the first threads will be racing between reading this value and completing their previous // iteration's table scan. // // The second condition is tracked by the dependent handle code itself on a per worker thread basis // (and updated by the GcDhReScan() method). We call GcDhUnpromotedHandlesExist() on each thread to // determine the local value and collect the results into the s_fUnpromotedHandles variable in what is // effectively an OR operation. As per s_fUnscannedPromotions we can't read the final result until // we're safely joined. if (GCScan::GcDhUnpromotedHandlesExist(sc)) s_fUnpromotedHandles = TRUE; // Synchronize all the threads so we can read our state variables safely. The shared variable // s_fScanRequired, indicating whether we should scan the tables or terminate the loop, will be set by // a single thread inside the join. gc_t_join.join(this, gc_join_scan_dependent_handles); if (gc_t_join.joined()) { // We're synchronized so it's safe to read our shared state variables. We update another shared // variable to indicate to all threads whether we'll be scanning for another cycle or terminating // the loop. We scan if there has been at least one object promotion since last time and at least // one thread has a dependent handle table with a potential handle promotion possible. s_fScanRequired = s_fUnscannedPromotions && s_fUnpromotedHandles; // Reset our shared state variables (ready to be set again on this scan or with a good initial // value for the next call if we're terminating the loop). s_fUnscannedPromotions = FALSE; s_fUnpromotedHandles = FALSE; if (!s_fScanRequired) { // We're terminating the loop. Perform any last operations that require single threaded access. if (!initial_scan_p) { // On the second invocation we reconcile all mark overflow ranges across the heaps. This can help // load balance if some of the heaps have an abnormally large workload. uint8_t* all_heaps_max = 0; uint8_t* all_heaps_min = MAX_PTR; int i; for (i = 0; i < n_heaps; i++) { if (all_heaps_max < g_heaps[i]->max_overflow_address) all_heaps_max = g_heaps[i]->max_overflow_address; if (all_heaps_min > g_heaps[i]->min_overflow_address) all_heaps_min = g_heaps[i]->min_overflow_address; } for (i = 0; i < n_heaps; i++) { g_heaps[i]->max_overflow_address = all_heaps_max; g_heaps[i]->min_overflow_address = all_heaps_min; } } } dprintf(3, ("Starting all gc thread mark stack overflow processing")); gc_t_join.restart(); } // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions // being visible. If there really was an overflow (process_mark_overflow returns true) then set the // global flag indicating that at least one object promotion may have occurred (the usual comment // about races applies). (Note it's OK to set this flag even if we're about to terminate the loop and // exit the method since we unconditionally set this variable on method entry anyway). if (process_mark_overflow(condemned_gen_number)) s_fUnscannedPromotions = TRUE; // If we decided that no scan was required we can terminate the loop now. if (!s_fScanRequired) break; // Otherwise we must join with the other workers to ensure that all mark stack overflows have been // processed before we start scanning dependent handle tables (if overflows remain while we scan we // could miss noting the promotion of some primary objects). gc_t_join.join(this, gc_join_rescan_dependent_handles); if (gc_t_join.joined()) { dprintf(3, ("Starting all gc thread for dependent handle promotion")); gc_t_join.restart(); } // If the portion of the dependent handle table managed by this worker has handles that could still be // promoted perform a rescan. If the rescan resulted in at least one promotion note this fact since it // could require a rescan of handles on this or other workers. if (GCScan::GcDhUnpromotedHandlesExist(sc)) if (GCScan::GcDhReScan(sc)) s_fUnscannedPromotions = TRUE; } } #else //MULTIPLE_HEAPS // Non-multiple heap version of scan_dependent_handles: much simpler without the need to keep multiple worker // threads synchronized. void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p) { UNREFERENCED_PARAMETER(initial_scan_p); // Whenever we call this method there may have been preceding object promotions. So set // fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set // based on the how the scanning proceeded). bool fUnscannedPromotions = true; // Loop until there are either no more dependent handles that can have their secondary promoted or we've // managed to perform a scan without promoting anything new. while (GCScan::GcDhUnpromotedHandlesExist(sc) && fUnscannedPromotions) { // On each iteration of the loop start with the assumption that no further objects have been promoted. fUnscannedPromotions = false; // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions // being visible. If there was an overflow (process_mark_overflow returned true) then additional // objects now appear to be promoted and we should set the flag. if (process_mark_overflow(condemned_gen_number)) fUnscannedPromotions = true; // Perform the scan and set the flag if any promotions resulted. if (GCScan::GcDhReScan(sc)) fUnscannedPromotions = true; } // Process any mark stack overflow that may have resulted from scanning handles (or if we didn't need to // scan any handles at all this is the processing of overflows that may have occurred prior to this method // invocation). process_mark_overflow(condemned_gen_number); } #endif //MULTIPLE_HEAPS size_t gc_heap::get_generation_start_size (int gen_number) { #ifdef USE_REGIONS return 0; #else return Align (size (generation_allocation_start (generation_of (gen_number))), get_alignment_constant (gen_number <= max_generation)); #endif //!USE_REGIONS } inline int gc_heap::get_num_heaps() { #ifdef MULTIPLE_HEAPS return n_heaps; #else return 1; #endif //MULTIPLE_HEAPS } BOOL gc_heap::decide_on_promotion_surv (size_t threshold) { #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS dynamic_data* dd = hp->dynamic_data_of (min ((settings.condemned_generation + 1), max_generation)); size_t older_gen_size = dd_current_size (dd) + (dd_desired_allocation (dd) - dd_new_allocation (dd)); size_t promoted = hp->total_promoted_bytes; dprintf (2, ("promotion threshold: %Id, promoted bytes: %Id size n+1: %Id", threshold, promoted, older_gen_size)); if ((threshold > (older_gen_size)) || (promoted > threshold)) { return TRUE; } } return FALSE; } inline void gc_heap::fire_mark_event (int root_type, size_t& current_promoted_bytes, size_t& last_promoted_bytes) { #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { current_promoted_bytes = get_promoted_bytes(); size_t root_promoted = current_promoted_bytes - last_promoted_bytes; dprintf (3, ("h%d marked root %s: %Id (%Id - %Id)", heap_number, str_root_kinds[root_type], root_promoted, current_promoted_bytes, last_promoted_bytes)); FIRE_EVENT(GCMarkWithType, heap_number, root_type, root_promoted); last_promoted_bytes = current_promoted_bytes; } #endif // FEATURE_EVENT_TRACE } #ifdef FEATURE_EVENT_TRACE inline void gc_heap::record_mark_time (uint64_t& mark_time, uint64_t& current_mark_time, uint64_t& last_mark_time) { if (informational_event_enabled_p) { current_mark_time = GetHighPrecisionTimeStamp(); mark_time = limit_time_to_uint32 (current_mark_time - last_mark_time); dprintf (3, ("%I64d - %I64d = %I64d", current_mark_time, last_mark_time, (current_mark_time - last_mark_time))); last_mark_time = current_mark_time; } } #endif // FEATURE_EVENT_TRACE void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p) { assert (settings.concurrent == FALSE); ScanContext sc; sc.thread_number = heap_number; sc.promotion = TRUE; sc.concurrent = FALSE; dprintf (2, (ThreadStressLog::gcStartMarkMsg(), heap_number, condemned_gen_number)); BOOL full_p = (condemned_gen_number == max_generation); int gen_to_init = condemned_gen_number; if (condemned_gen_number == max_generation) { gen_to_init = total_generation_count - 1; } for (int gen_idx = 0; gen_idx <= gen_to_init; gen_idx++) { dynamic_data* dd = dynamic_data_of (gen_idx); dd_begin_data_size (dd) = generation_size (gen_idx) - dd_fragmentation (dd) - #ifdef USE_REGIONS 0; #else get_generation_start_size (gen_idx); #endif //USE_REGIONS dprintf (2, ("begin data size for gen%d is %Id", gen_idx, dd_begin_data_size (dd))); dd_survived_size (dd) = 0; dd_pinned_survived_size (dd) = 0; dd_artificial_pinned_survived_size (dd) = 0; dd_added_pinned_size (dd) = 0; #ifdef SHORT_PLUGS dd_padding_size (dd) = 0; #endif //SHORT_PLUGS #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN) dd_num_npinned_plugs (dd) = 0; #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN } if (gen0_must_clear_bricks > 0) gen0_must_clear_bricks--; size_t last_promoted_bytes = 0; size_t current_promoted_bytes = 0; #if !defined(USE_REGIONS) || defined(_DEBUG) init_promoted_bytes(); #endif //!USE_REGIONS || _DEBUG reset_mark_stack(); #ifdef SNOOP_STATS memset (&snoop_stat, 0, sizeof(snoop_stat)); snoop_stat.heap_index = heap_number; #endif //SNOOP_STATS #ifdef MH_SC_MARK if (full_p) { //initialize the mark stack for (int i = 0; i < max_snoop_level; i++) { ((uint8_t**)(mark_stack_array))[i] = 0; } mark_stack_busy() = 1; } #endif //MH_SC_MARK static uint32_t num_sizedrefs = 0; #ifdef MH_SC_MARK static BOOL do_mark_steal_p = FALSE; #endif //MH_SC_MARK #ifdef FEATURE_CARD_MARKING_STEALING reset_card_marking_enumerators(); #endif // FEATURE_CARD_MARKING_STEALING #ifdef STRESS_REGIONS heap_segment* gen0_region = generation_start_segment (generation_of (0)); while (gen0_region) { size_t gen0_region_size = heap_segment_allocated (gen0_region) - heap_segment_mem (gen0_region); if (gen0_region_size > 0) { if ((num_gen0_regions % pinning_seg_interval) == 0) { dprintf (REGIONS_LOG, ("h%d potentially creating pinning in region %Ix", heap_number, heap_segment_mem (gen0_region))); int align_const = get_alignment_constant (TRUE); // Pinning the first and the middle object in the region. uint8_t* boundary = heap_segment_mem (gen0_region); uint8_t* obj_to_pin = boundary; int num_pinned_objs = 0; while (obj_to_pin < heap_segment_allocated (gen0_region)) { if (obj_to_pin >= boundary && !((CObjectHeader*)obj_to_pin)->IsFree()) { pin_by_gc (obj_to_pin); num_pinned_objs++; if (num_pinned_objs >= 2) break; boundary += (gen0_region_size / 2) + 1; } obj_to_pin += Align (size (obj_to_pin), align_const); } } } num_gen0_regions++; gen0_region = heap_segment_next (gen0_region); } #endif //STRESS_REGIONS #ifdef FEATURE_EVENT_TRACE static uint64_t current_mark_time = 0; static uint64_t last_mark_time = 0; #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS gc_t_join.join(this, gc_join_begin_mark_phase); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { maxgen_size_inc_p = false; #ifdef USE_REGIONS special_sweep_p = false; region_count = global_region_allocator.get_used_region_count(); grow_mark_list_piece(); #endif //USE_REGIONS GCToEEInterface::BeforeGcScanRoots(condemned_gen_number, /* is_bgc */ false, /* is_concurrent */ false); num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles(); #ifdef FEATURE_EVENT_TRACE informational_event_enabled_p = EVENT_ENABLED (GCMarkWithType); if (informational_event_enabled_p) { last_mark_time = GetHighPrecisionTimeStamp(); // We may not have SizedRefs to mark so init it to 0. gc_time_info[time_mark_sizedref] = 0; } #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS #ifdef MH_SC_MARK if (full_p) { size_t total_heap_size = get_total_heap_size(); if (total_heap_size > (100 * 1024 * 1024)) { do_mark_steal_p = TRUE; } else { do_mark_steal_p = FALSE; } } else { do_mark_steal_p = FALSE; } #endif //MH_SC_MARK gc_t_join.restart(); #endif //MULTIPLE_HEAPS } { //set up the mark lists from g_mark_list assert (g_mark_list); #ifdef MULTIPLE_HEAPS mark_list = &g_mark_list [heap_number*mark_list_size]; #else mark_list = g_mark_list; #endif //MULTIPLE_HEAPS //dont use the mark list for full gc //because multiple segments are more complex to handle and the list //is likely to overflow if (condemned_gen_number < max_generation) mark_list_end = &mark_list [mark_list_size-1]; else mark_list_end = &mark_list [0]; mark_list_index = &mark_list [0]; #ifdef USE_REGIONS if (g_mark_list_piece != nullptr) { #ifdef MULTIPLE_HEAPS // two arrays with alloc_count entries per heap mark_list_piece_start = &g_mark_list_piece[heap_number * 2 * g_mark_list_piece_size]; mark_list_piece_end = &mark_list_piece_start[g_mark_list_piece_size]; #endif //MULTIPLE_HEAPS survived_per_region = (size_t*)&g_mark_list_piece[heap_number * 2 * g_mark_list_piece_size]; old_card_survived_per_region = (size_t*)&survived_per_region[g_mark_list_piece_size]; size_t region_info_to_clear = region_count * sizeof (size_t); memset (survived_per_region, 0, region_info_to_clear); memset (old_card_survived_per_region, 0, region_info_to_clear); } else { #ifdef MULTIPLE_HEAPS // disable use of mark list altogether mark_list_piece_start = nullptr; mark_list_piece_end = nullptr; mark_list_end = &mark_list[0]; #endif //MULTIPLE_HEAPS survived_per_region = nullptr; old_card_survived_per_region = nullptr; } #endif // USE_REGIONS && MULTIPLE_HEAPS #ifndef MULTIPLE_HEAPS shigh = (uint8_t*) 0; slow = MAX_PTR; #endif //MULTIPLE_HEAPS if ((condemned_gen_number == max_generation) && (num_sizedrefs > 0)) { GCScan::GcScanSizedRefs(GCHeap::Promote, condemned_gen_number, max_generation, &sc); fire_mark_event (ETW::GC_ROOT_SIZEDREF, current_promoted_bytes, last_promoted_bytes); #ifdef MULTIPLE_HEAPS gc_t_join.join(this, gc_join_scan_sizedref_done); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_EVENT_TRACE record_mark_time (gc_time_info[time_mark_sizedref], current_mark_time, last_mark_time); #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS dprintf(3, ("Done with marking all sized refs. Starting all gc thread for marking other strong roots")); gc_t_join.restart(); #endif //MULTIPLE_HEAPS } } dprintf(3,("Marking Roots")); GCScan::GcScanRoots(GCHeap::Promote, condemned_gen_number, max_generation, &sc); fire_mark_event (ETW::GC_ROOT_STACK, current_promoted_bytes, last_promoted_bytes); #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { scan_background_roots (GCHeap::Promote, heap_number, &sc); fire_mark_event (ETW::GC_ROOT_BGC, current_promoted_bytes, last_promoted_bytes); } #endif //BACKGROUND_GC #ifdef FEATURE_PREMORTEM_FINALIZATION dprintf(3, ("Marking finalization data")); finalize_queue->GcScanRoots(GCHeap::Promote, heap_number, 0); fire_mark_event (ETW::GC_ROOT_FQ, current_promoted_bytes, last_promoted_bytes); #endif // FEATURE_PREMORTEM_FINALIZATION dprintf(3,("Marking handle table")); GCScan::GcScanHandles(GCHeap::Promote, condemned_gen_number, max_generation, &sc); fire_mark_event (ETW::GC_ROOT_HANDLES, current_promoted_bytes, last_promoted_bytes); if (!full_p) { #ifdef USE_REGIONS save_current_survived(); #endif //USE_REGIONS #ifdef FEATURE_CARD_MARKING_STEALING n_eph_soh = 0; n_gen_soh = 0; n_eph_loh = 0; n_gen_loh = 0; #endif //FEATURE_CARD_MARKING_STEALING #ifdef CARD_BUNDLE #ifdef MULTIPLE_HEAPS if (gc_t_join.r_join(this, gc_r_join_update_card_bundle)) { #endif //MULTIPLE_HEAPS #ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES // If we are manually managing card bundles, every write to the card table should already be // accounted for in the card bundle table so there's nothing to update here. update_card_table_bundle(); #endif if (card_bundles_enabled()) { verify_card_bundles(); } #ifdef MULTIPLE_HEAPS gc_t_join.r_restart(); } #endif //MULTIPLE_HEAPS #endif //CARD_BUNDLE card_fn mark_object_fn = &gc_heap::mark_object_simple; #ifdef HEAP_ANALYZE heap_analyze_success = TRUE; if (heap_analyze_enabled) { internal_root_array_index = 0; current_obj = 0; current_obj_size = 0; mark_object_fn = &gc_heap::ha_mark_object_simple; } #endif //HEAP_ANALYZE #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) if (!card_mark_done_soh) #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING { dprintf (3, ("Marking cross generation pointers on heap %d", heap_number)); mark_through_cards_for_segments(mark_object_fn, FALSE THIS_ARG); #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) card_mark_done_soh = true; #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING } #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) if (!card_mark_done_uoh) #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING { dprintf (3, ("Marking cross generation pointers for uoh objects on heap %d", heap_number)); for (int i = uoh_start_generation; i < total_generation_count; i++) { #ifndef ALLOW_REFERENCES_IN_POH if (i != poh_generation) #endif //ALLOW_REFERENCES_IN_POH mark_through_cards_for_uoh_objects(mark_object_fn, i, FALSE THIS_ARG); } #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) card_mark_done_uoh = true; #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING } #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) // check the other heaps cyclically and try to help out where the marking isn't done for (int i = 0; i < gc_heap::n_heaps; i++) { int heap_number_to_look_at = (i + heap_number) % gc_heap::n_heaps; gc_heap* hp = gc_heap::g_heaps[heap_number_to_look_at]; if (!hp->card_mark_done_soh) { dprintf(3, ("Marking cross generation pointers on heap %d", hp->heap_number)); hp->mark_through_cards_for_segments(mark_object_fn, FALSE THIS_ARG); hp->card_mark_done_soh = true; } if (!hp->card_mark_done_uoh) { dprintf(3, ("Marking cross generation pointers for large objects on heap %d", hp->heap_number)); for (int i = uoh_start_generation; i < total_generation_count; i++) { #ifndef ALLOW_REFERENCES_IN_POH if (i != poh_generation) #endif //ALLOW_REFERENCES_IN_POH hp->mark_through_cards_for_uoh_objects(mark_object_fn, i, FALSE THIS_ARG); } hp->card_mark_done_uoh = true; } } #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING #ifdef USE_REGIONS update_old_card_survived(); #endif //USE_REGIONS fire_mark_event (ETW::GC_ROOT_OLDER, current_promoted_bytes, last_promoted_bytes); } } #ifdef MH_SC_MARK if (do_mark_steal_p) { mark_steal(); fire_mark_event (ETW::GC_ROOT_STEAL, current_promoted_bytes, last_promoted_bytes); } #endif //MH_SC_MARK // Dependent handles need to be scanned with a special algorithm (see the header comment on // scan_dependent_handles for more detail). We perform an initial scan without synchronizing with other // worker threads or processing any mark stack overflow. This is not guaranteed to complete the operation // but in a common case (where there are no dependent handles that are due to be collected) it allows us // to optimize away further scans. The call to scan_dependent_handles is what will cycle through more // iterations if required and will also perform processing of any mark stack overflow once the dependent // handle table has been fully promoted. GCScan::GcDhInitialScan(GCHeap::Promote, condemned_gen_number, max_generation, &sc); scan_dependent_handles(condemned_gen_number, &sc, true); fire_mark_event (ETW::GC_ROOT_DH_HANDLES, current_promoted_bytes, last_promoted_bytes); #ifdef MULTIPLE_HEAPS dprintf(3, ("Joining for short weak handle scan")); gc_t_join.join(this, gc_join_null_dead_short_weak); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_EVENT_TRACE record_mark_time (gc_time_info[time_mark_roots], current_mark_time, last_mark_time); #endif //FEATURE_EVENT_TRACE uint64_t promoted_bytes_global = 0; #ifdef HEAP_ANALYZE heap_analyze_enabled = FALSE; #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { promoted_bytes_global += g_heaps[i]->get_promoted_bytes(); } #else promoted_bytes_global = get_promoted_bytes(); #endif //MULTIPLE_HEAPS GCToEEInterface::AnalyzeSurvivorsFinished (settings.gc_index, condemned_gen_number, promoted_bytes_global, GCHeap::ReportGenerationBounds); #endif // HEAP_ANALYZE GCToEEInterface::AfterGcScanRoots (condemned_gen_number, max_generation, &sc); #ifdef MULTIPLE_HEAPS if (!full_p) { // we used r_join and need to reinitialize states for it here. gc_t_join.r_init(); } dprintf(3, ("Starting all gc thread for short weak handle scan")); gc_t_join.restart(); #endif //MULTIPLE_HEAPS } #ifdef FEATURE_CARD_MARKING_STEALING reset_card_marking_enumerators(); if (!full_p) { int generation_skip_ratio_soh = ((n_eph_soh > MIN_SOH_CROSS_GEN_REFS) ? (int)(((float)n_gen_soh / (float)n_eph_soh) * 100) : 100); int generation_skip_ratio_loh = ((n_eph_loh > MIN_LOH_CROSS_GEN_REFS) ? (int)(((float)n_gen_loh / (float)n_eph_loh) * 100) : 100); generation_skip_ratio = min (generation_skip_ratio_soh, generation_skip_ratio_loh); dprintf (2, ("h%d skip ratio soh: %d, loh: %d", heap_number, generation_skip_ratio_soh, generation_skip_ratio_loh)); } #endif // FEATURE_CARD_MARKING_STEALING // null out the target of short weakref that were not promoted. GCScan::GcShortWeakPtrScan (condemned_gen_number, max_generation,&sc); #ifdef MULTIPLE_HEAPS dprintf(3, ("Joining for finalization")); gc_t_join.join(this, gc_join_scan_finalization); if (gc_t_join.joined()) { #endif //MULTIPLE_HEAPS #ifdef FEATURE_EVENT_TRACE record_mark_time (gc_time_info[time_mark_short_weak], current_mark_time, last_mark_time); #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS dprintf(3, ("Starting all gc thread for Finalization")); gc_t_join.restart(); } #endif //MULTIPLE_HEAPS //Handle finalization. size_t promoted_bytes_live = get_promoted_bytes(); #ifdef FEATURE_PREMORTEM_FINALIZATION dprintf (3, ("Finalize marking")); finalize_queue->ScanForFinalization (GCHeap::Promote, condemned_gen_number, mark_only_p, __this); fire_mark_event (ETW::GC_ROOT_NEW_FQ, current_promoted_bytes, last_promoted_bytes); GCToEEInterface::DiagWalkFReachableObjects(__this); // Scan dependent handles again to promote any secondaries associated with primaries that were promoted // for finalization. As before scan_dependent_handles will also process any mark stack overflow. scan_dependent_handles(condemned_gen_number, &sc, false); fire_mark_event (ETW::GC_ROOT_DH_HANDLES, current_promoted_bytes, last_promoted_bytes); #endif //FEATURE_PREMORTEM_FINALIZATION total_promoted_bytes = get_promoted_bytes(); #ifdef MULTIPLE_HEAPS static VOLATILE(int32_t) syncblock_scan_p; dprintf(3, ("Joining for weak pointer deletion")); gc_t_join.join(this, gc_join_null_dead_long_weak); if (gc_t_join.joined()) { dprintf(3, ("Starting all gc thread for weak pointer deletion")); #endif //MULTIPLE_HEAPS #ifdef FEATURE_EVENT_TRACE record_mark_time (gc_time_info[time_mark_scan_finalization], current_mark_time, last_mark_time); #endif //FEATURE_EVENT_TRACE #ifdef USE_REGIONS sync_promoted_bytes(); equalize_promoted_bytes(); #endif //USE_REGIONS #ifdef MULTIPLE_HEAPS syncblock_scan_p = 0; gc_t_join.restart(); } #endif //MULTIPLE_HEAPS // null out the target of long weakref that were not promoted. GCScan::GcWeakPtrScan (condemned_gen_number, max_generation, &sc); #ifdef MULTIPLE_HEAPS size_t total_mark_list_size = sort_mark_list(); // first thread to finish sorting will scan the sync syncblk cache if ((syncblock_scan_p == 0) && (Interlocked::Increment(&syncblock_scan_p) == 1)) #endif //MULTIPLE_HEAPS { // scan for deleted entries in the syncblk cache GCScan::GcWeakPtrScanBySingleThread(condemned_gen_number, max_generation, &sc); } #ifdef MULTIPLE_HEAPS dprintf (3, ("Joining for sync block cache entry scanning")); gc_t_join.join(this, gc_join_null_dead_syncblk); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_EVENT_TRACE record_mark_time (gc_time_info[time_plan - 1], current_mark_time, last_mark_time); gc_time_info[time_plan] = last_mark_time; #endif //FEATURE_EVENT_TRACE //decide on promotion if (!settings.promotion) { size_t m = 0; for (int n = 0; n <= condemned_gen_number;n++) { #ifdef MULTIPLE_HEAPS m += (size_t)(dd_min_size (dynamic_data_of (n))*(n+1)*0.1); #else m += (size_t)(dd_min_size (dynamic_data_of (n))*(n+1)*0.06); #endif //MULTIPLE_HEAPS } settings.promotion = decide_on_promotion_surv (m); } #ifdef MULTIPLE_HEAPS #ifdef SNOOP_STATS if (do_mark_steal_p) { size_t objects_checked_count = 0; size_t zero_ref_count = 0; size_t objects_marked_count = 0; size_t check_level_count = 0; size_t busy_count = 0; size_t interlocked_count = 0; size_t partial_mark_parent_count = 0; size_t stolen_or_pm_count = 0; size_t stolen_entry_count = 0; size_t pm_not_ready_count = 0; size_t normal_count = 0; size_t stack_bottom_clear_count = 0; for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; hp->print_snoop_stat(); objects_checked_count += hp->snoop_stat.objects_checked_count; zero_ref_count += hp->snoop_stat.zero_ref_count; objects_marked_count += hp->snoop_stat.objects_marked_count; check_level_count += hp->snoop_stat.check_level_count; busy_count += hp->snoop_stat.busy_count; interlocked_count += hp->snoop_stat.interlocked_count; partial_mark_parent_count += hp->snoop_stat.partial_mark_parent_count; stolen_or_pm_count += hp->snoop_stat.stolen_or_pm_count; stolen_entry_count += hp->snoop_stat.stolen_entry_count; pm_not_ready_count += hp->snoop_stat.pm_not_ready_count; normal_count += hp->snoop_stat.normal_count; stack_bottom_clear_count += hp->snoop_stat.stack_bottom_clear_count; } fflush (stdout); printf ("-------total stats-------\n"); printf ("%8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s\n", "checked", "zero", "marked", "level", "busy", "xchg", "pmparent", "s_pm", "stolen", "nready", "normal", "clear"); printf ("%8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d\n", objects_checked_count, zero_ref_count, objects_marked_count, check_level_count, busy_count, interlocked_count, partial_mark_parent_count, stolen_or_pm_count, stolen_entry_count, pm_not_ready_count, normal_count, stack_bottom_clear_count); } #endif //SNOOP_STATS dprintf(3, ("Starting all threads for end of mark phase")); gc_t_join.restart(); #endif //MULTIPLE_HEAPS } #if defined(MULTIPLE_HEAPS) && !defined(USE_REGIONS) merge_mark_lists (total_mark_list_size); #endif //MULTIPLE_HEAPS && !USE_REGIONS finalization_promoted_bytes = total_promoted_bytes - promoted_bytes_live; dprintf(2,("---- End of mark phase ----")); } inline void gc_heap::pin_object (uint8_t* o, uint8_t** ppObject) { dprintf (3, ("Pinning %Ix->%Ix", (size_t)ppObject, (size_t)o)); set_pinned (o); #ifdef FEATURE_EVENT_TRACE if(EVENT_ENABLED(PinObjectAtGCTime)) { fire_etw_pin_object_event(o, ppObject); } #endif // FEATURE_EVENT_TRACE num_pinned_objects++; } size_t gc_heap::get_total_pinned_objects() { #ifdef MULTIPLE_HEAPS size_t total_num_pinned_objects = 0; for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; total_num_pinned_objects += hp->num_pinned_objects; } return total_num_pinned_objects; #else //MULTIPLE_HEAPS return num_pinned_objects; #endif //MULTIPLE_HEAPS } void gc_heap::reinit_pinned_objects() { #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap::g_heaps[i]->num_pinned_objects = 0; } #else //MULTIPLE_HEAPS num_pinned_objects = 0; #endif //MULTIPLE_HEAPS } void gc_heap::reset_mark_stack () { reset_pinned_queue(); max_overflow_address = 0; min_overflow_address = MAX_PTR; } #ifdef FEATURE_STRUCTALIGN // // The word with left child, right child, and align info is laid out as follows: // // | upper short word | lower short word | // |<------------> <----->|<------------> <----->| // | left child info hi| right child info lo| // x86: | 10 bits 6 bits| 10 bits 6 bits| // // where left/right child are signed values and concat(info hi, info lo) is unsigned. // // The "align info" encodes two numbers: the required alignment (a power of two) // and the misalignment (the number of machine words the destination address needs // to be adjusted by to provide alignment - so this number is always smaller than // the required alignment). Thus, the two can be represented as the "logical or" // of the two numbers. Note that the actual pad is computed from the misalignment // by adding the alignment iff the misalignment is non-zero and less than min_obj_size. // // The number of bits in a brick. #if defined (TARGET_AMD64) #define brick_bits (12) #else #define brick_bits (11) #endif //TARGET_AMD64 C_ASSERT(brick_size == (1 << brick_bits)); // The number of bits needed to represent the offset to a child node. // "brick_bits + 1" allows us to represent a signed offset within a brick. #define child_bits (brick_bits + 1 - LOG2_PTRSIZE) // The number of bits in each of the pad hi, pad lo fields. #define pad_bits (sizeof(short) * 8 - child_bits) #define child_from_short(w) (((signed short)(w) / (1 << (pad_bits - LOG2_PTRSIZE))) & ~((1 << LOG2_PTRSIZE) - 1)) #define pad_mask ((1 << pad_bits) - 1) #define pad_from_short(w) ((size_t)(w) & pad_mask) #else // FEATURE_STRUCTALIGN #define child_from_short(w) (w) #endif // FEATURE_STRUCTALIGN inline short node_left_child(uint8_t* node) { return child_from_short(((plug_and_pair*)node)[-1].m_pair.left); } inline void set_node_left_child(uint8_t* node, ptrdiff_t val) { assert (val > -(ptrdiff_t)brick_size); assert (val < (ptrdiff_t)brick_size); assert (Aligned (val)); #ifdef FEATURE_STRUCTALIGN size_t pad = pad_from_short(((plug_and_pair*)node)[-1].m_pair.left); ((plug_and_pair*)node)[-1].m_pair.left = ((short)val << (pad_bits - LOG2_PTRSIZE)) | (short)pad; #else // FEATURE_STRUCTALIGN ((plug_and_pair*)node)[-1].m_pair.left = (short)val; #endif // FEATURE_STRUCTALIGN assert (node_left_child (node) == val); } inline short node_right_child(uint8_t* node) { return child_from_short(((plug_and_pair*)node)[-1].m_pair.right); } inline void set_node_right_child(uint8_t* node, ptrdiff_t val) { assert (val > -(ptrdiff_t)brick_size); assert (val < (ptrdiff_t)brick_size); assert (Aligned (val)); #ifdef FEATURE_STRUCTALIGN size_t pad = pad_from_short(((plug_and_pair*)node)[-1].m_pair.right); ((plug_and_pair*)node)[-1].m_pair.right = ((short)val << (pad_bits - LOG2_PTRSIZE)) | (short)pad; #else // FEATURE_STRUCTALIGN ((plug_and_pair*)node)[-1].m_pair.right = (short)val; #endif // FEATURE_STRUCTALIGN assert (node_right_child (node) == val); } #ifdef FEATURE_STRUCTALIGN void node_aligninfo (uint8_t* node, int& requiredAlignment, ptrdiff_t& pad) { // Extract the single-number aligninfo from the fields. short left = ((plug_and_pair*)node)[-1].m_pair.left; short right = ((plug_and_pair*)node)[-1].m_pair.right; ptrdiff_t pad_shifted = (pad_from_short(left) << pad_bits) | pad_from_short(right); ptrdiff_t aligninfo = pad_shifted * DATA_ALIGNMENT; // Replicate the topmost bit into all lower bits. ptrdiff_t x = aligninfo; x |= x >> 8; x |= x >> 4; x |= x >> 2; x |= x >> 1; // Clear all bits but the highest. requiredAlignment = (int)(x ^ (x >> 1)); pad = aligninfo - requiredAlignment; pad += AdjustmentForMinPadSize(pad, requiredAlignment); } inline ptrdiff_t node_alignpad (uint8_t* node) { int requiredAlignment; ptrdiff_t alignpad; node_aligninfo (node, requiredAlignment, alignpad); return alignpad; } void clear_node_aligninfo (uint8_t* node) { ((plug_and_pair*)node)[-1].m_pair.left &= ~0 << pad_bits; ((plug_and_pair*)node)[-1].m_pair.right &= ~0 << pad_bits; } void set_node_aligninfo (uint8_t* node, int requiredAlignment, ptrdiff_t pad) { // Encode the alignment requirement and alignment offset as a single number // as described above. ptrdiff_t aligninfo = (size_t)requiredAlignment + (pad & (requiredAlignment-1)); assert (Aligned (aligninfo)); ptrdiff_t aligninfo_shifted = aligninfo / DATA_ALIGNMENT; assert (aligninfo_shifted < (1 << (pad_bits + pad_bits))); ptrdiff_t hi = aligninfo_shifted >> pad_bits; assert (pad_from_short(((plug_and_gap*)node)[-1].m_pair.left) == 0); ((plug_and_pair*)node)[-1].m_pair.left |= hi; ptrdiff_t lo = aligninfo_shifted & pad_mask; assert (pad_from_short(((plug_and_gap*)node)[-1].m_pair.right) == 0); ((plug_and_pair*)node)[-1].m_pair.right |= lo; #ifdef _DEBUG int requiredAlignment2; ptrdiff_t pad2; node_aligninfo (node, requiredAlignment2, pad2); assert (requiredAlignment == requiredAlignment2); assert (pad == pad2); #endif // _DEBUG } #endif // FEATURE_STRUCTALIGN inline void loh_set_node_relocation_distance(uint8_t* node, ptrdiff_t val) { ptrdiff_t* place = &(((loh_obj_and_pad*)node)[-1].reloc); *place = val; } inline ptrdiff_t loh_node_relocation_distance(uint8_t* node) { return (((loh_obj_and_pad*)node)[-1].reloc); } inline ptrdiff_t node_relocation_distance (uint8_t* node) { return (((plug_and_reloc*)(node))[-1].reloc & ~3); } inline void set_node_relocation_distance(uint8_t* node, ptrdiff_t val) { assert (val == (val & ~3)); ptrdiff_t* place = &(((plug_and_reloc*)node)[-1].reloc); //clear the left bit and the relocation field *place &= 1; *place |= val; } #define node_left_p(node) (((plug_and_reloc*)(node))[-1].reloc & 2) #define set_node_left(node) ((plug_and_reloc*)(node))[-1].reloc |= 2; #ifndef FEATURE_STRUCTALIGN void set_node_realigned(uint8_t* node) { ((plug_and_reloc*)(node))[-1].reloc |= 1; } void clear_node_realigned(uint8_t* node) { #ifdef RESPECT_LARGE_ALIGNMENT ((plug_and_reloc*)(node))[-1].reloc &= ~1; #else UNREFERENCED_PARAMETER(node); #endif //RESPECT_LARGE_ALIGNMENT } #endif // FEATURE_STRUCTALIGN inline size_t node_gap_size (uint8_t* node) { return ((plug_and_gap *)node)[-1].gap; } void set_gap_size (uint8_t* node, size_t size) { assert (Aligned (size)); // clear the 2 uint32_t used by the node. ((plug_and_gap *)node)[-1].reloc = 0; ((plug_and_gap *)node)[-1].lr =0; ((plug_and_gap *)node)[-1].gap = size; assert ((size == 0 )||(size >= sizeof(plug_and_reloc))); } uint8_t* gc_heap::insert_node (uint8_t* new_node, size_t sequence_number, uint8_t* tree, uint8_t* last_node) { dprintf (3, ("IN: %Ix(%Ix), T: %Ix(%Ix), L: %Ix(%Ix) [%Ix]", (size_t)new_node, brick_of(new_node), (size_t)tree, brick_of(tree), (size_t)last_node, brick_of(last_node), sequence_number)); if (power_of_two_p (sequence_number)) { set_node_left_child (new_node, (tree - new_node)); dprintf (3, ("NT: %Ix, LC->%Ix", (size_t)new_node, (tree - new_node))); tree = new_node; } else { if (oddp (sequence_number)) { set_node_right_child (last_node, (new_node - last_node)); dprintf (3, ("%Ix RC->%Ix", last_node, (new_node - last_node))); } else { uint8_t* earlier_node = tree; size_t imax = logcount(sequence_number) - 2; for (size_t i = 0; i != imax; i++) { earlier_node = earlier_node + node_right_child (earlier_node); } int tmp_offset = node_right_child (earlier_node); assert (tmp_offset); // should never be empty set_node_left_child (new_node, ((earlier_node + tmp_offset ) - new_node)); set_node_right_child (earlier_node, (new_node - earlier_node)); dprintf (3, ("%Ix LC->%Ix, %Ix RC->%Ix", new_node, ((earlier_node + tmp_offset ) - new_node), earlier_node, (new_node - earlier_node))); } } return tree; } size_t gc_heap::update_brick_table (uint8_t* tree, size_t current_brick, uint8_t* x, uint8_t* plug_end) { dprintf (3, ("tree: %Ix, current b: %Ix, x: %Ix, plug_end: %Ix", tree, current_brick, x, plug_end)); if (tree != NULL) { dprintf (3, ("b- %Ix->%Ix pointing to tree %Ix", current_brick, (size_t)(tree - brick_address (current_brick)), tree)); set_brick (current_brick, (tree - brick_address (current_brick))); } else { dprintf (3, ("b- %Ix->-1", current_brick)); set_brick (current_brick, -1); } size_t b = 1 + current_brick; ptrdiff_t offset = 0; size_t last_br = brick_of (plug_end-1); current_brick = brick_of (x-1); dprintf (3, ("ubt: %Ix->%Ix]->%Ix]", b, last_br, current_brick)); while (b <= current_brick) { if (b <= last_br) { set_brick (b, --offset); } else { set_brick (b,-1); } b++; } return brick_of (x); } #ifndef USE_REGIONS void gc_heap::plan_generation_start (generation* gen, generation* consing_gen, uint8_t* next_plug_to_allocate) { #ifdef HOST_64BIT // We should never demote big plugs to gen0. if (gen == youngest_generation) { heap_segment* seg = ephemeral_heap_segment; size_t mark_stack_large_bos = mark_stack_bos; size_t large_plug_pos = 0; while (mark_stack_large_bos < mark_stack_tos) { if (mark_stack_array[mark_stack_large_bos].len > demotion_plug_len_th) { while (mark_stack_bos <= mark_stack_large_bos) { size_t entry = deque_pinned_plug(); size_t len = pinned_len (pinned_plug_of (entry)); uint8_t* plug = pinned_plug (pinned_plug_of(entry)); if (len > demotion_plug_len_th) { dprintf (2, ("ps(%d): S %Ix (%Id)(%Ix)", gen->gen_num, plug, len, (plug+len))); } pinned_len (pinned_plug_of (entry)) = plug - generation_allocation_pointer (consing_gen); assert(mark_stack_array[entry].len == 0 || mark_stack_array[entry].len >= Align(min_obj_size)); generation_allocation_pointer (consing_gen) = plug + len; generation_allocation_limit (consing_gen) = heap_segment_plan_allocated (seg); set_allocator_next_pin (consing_gen); } } mark_stack_large_bos++; } } #endif // HOST_64BIT generation_plan_allocation_start (gen) = allocate_in_condemned_generations (consing_gen, Align (min_obj_size), -1); generation_plan_allocation_start_size (gen) = Align (min_obj_size); size_t allocation_left = (size_t)(generation_allocation_limit (consing_gen) - generation_allocation_pointer (consing_gen)); if (next_plug_to_allocate) { size_t dist_to_next_plug = (size_t)(next_plug_to_allocate - generation_allocation_pointer (consing_gen)); if (allocation_left > dist_to_next_plug) { allocation_left = dist_to_next_plug; } } if (allocation_left < Align (min_obj_size)) { generation_plan_allocation_start_size (gen) += allocation_left; generation_allocation_pointer (consing_gen) += allocation_left; } dprintf (2, ("plan alloc gen%d(%Ix) start at %Ix (ptr: %Ix, limit: %Ix, next: %Ix)", gen->gen_num, generation_plan_allocation_start (gen), generation_plan_allocation_start_size (gen), generation_allocation_pointer (consing_gen), generation_allocation_limit (consing_gen), next_plug_to_allocate)); } void gc_heap::realloc_plan_generation_start (generation* gen, generation* consing_gen) { BOOL adjacentp = FALSE; generation_plan_allocation_start (gen) = allocate_in_expanded_heap (consing_gen, Align(min_obj_size), adjacentp, 0, #ifdef SHORT_PLUGS FALSE, NULL, #endif //SHORT_PLUGS FALSE, -1 REQD_ALIGN_AND_OFFSET_ARG); generation_plan_allocation_start_size (gen) = Align (min_obj_size); size_t allocation_left = (size_t)(generation_allocation_limit (consing_gen) - generation_allocation_pointer (consing_gen)); if ((allocation_left < Align (min_obj_size)) && (generation_allocation_limit (consing_gen)!=heap_segment_plan_allocated (generation_allocation_segment (consing_gen)))) { generation_plan_allocation_start_size (gen) += allocation_left; generation_allocation_pointer (consing_gen) += allocation_left; } dprintf (1, ("plan re-alloc gen%d start at %Ix (ptr: %Ix, limit: %Ix)", gen->gen_num, generation_plan_allocation_start (consing_gen), generation_allocation_pointer (consing_gen), generation_allocation_limit (consing_gen))); } void gc_heap::plan_generation_starts (generation*& consing_gen) { //make sure that every generation has a planned allocation start int gen_number = settings.condemned_generation; while (gen_number >= 0) { if (gen_number < max_generation) { consing_gen = ensure_ephemeral_heap_segment (consing_gen); } generation* gen = generation_of (gen_number); if (0 == generation_plan_allocation_start (gen)) { plan_generation_start (gen, consing_gen, 0); assert (generation_plan_allocation_start (gen)); } gen_number--; } // now we know the planned allocation size heap_segment_plan_allocated (ephemeral_heap_segment) = generation_allocation_pointer (consing_gen); } void gc_heap::advance_pins_for_demotion (generation* gen) { uint8_t* original_youngest_start = generation_allocation_start (youngest_generation); heap_segment* seg = ephemeral_heap_segment; if ((!(pinned_plug_que_empty_p()))) { size_t gen1_pinned_promoted = generation_pinned_allocation_compact_size (generation_of (max_generation)); size_t gen1_pins_left = dd_pinned_survived_size (dynamic_data_of (max_generation - 1)) - gen1_pinned_promoted; size_t total_space_to_skip = last_gen1_pin_end - generation_allocation_pointer (gen); float pin_frag_ratio = (float)gen1_pins_left / (float)total_space_to_skip; float pin_surv_ratio = (float)gen1_pins_left / (float)(dd_survived_size (dynamic_data_of (max_generation - 1))); if ((pin_frag_ratio > 0.15) && (pin_surv_ratio > 0.30)) { while (!pinned_plug_que_empty_p() && (pinned_plug (oldest_pin()) < original_youngest_start)) { size_t entry = deque_pinned_plug(); size_t len = pinned_len (pinned_plug_of (entry)); uint8_t* plug = pinned_plug (pinned_plug_of(entry)); pinned_len (pinned_plug_of (entry)) = plug - generation_allocation_pointer (gen); assert(mark_stack_array[entry].len == 0 || mark_stack_array[entry].len >= Align(min_obj_size)); generation_allocation_pointer (gen) = plug + len; generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); set_allocator_next_pin (gen); //Add the size of the pinned plug to the right pinned allocations //find out which gen this pinned plug came from int frgn = object_gennum (plug); if ((frgn != (int)max_generation) && settings.promotion) { int togn = object_gennum_plan (plug); generation_pinned_allocation_sweep_size ((generation_of (frgn +1))) += len; if (frgn < togn) { generation_pinned_allocation_compact_size (generation_of (togn)) += len; } } dprintf (2, ("skipping gap %d, pin %Ix (%Id)", pinned_len (pinned_plug_of (entry)), plug, len)); } } dprintf (2, ("ad_p_d: PL: %Id, SL: %Id, pfr: %d, psr: %d", gen1_pins_left, total_space_to_skip, (int)(pin_frag_ratio*100), (int)(pin_surv_ratio*100))); } } void gc_heap::process_ephemeral_boundaries (uint8_t* x, int& active_new_gen_number, int& active_old_gen_number, generation*& consing_gen, BOOL& allocate_in_condemned) { retry: if ((active_old_gen_number > 0) && (x >= generation_allocation_start (generation_of (active_old_gen_number - 1)))) { dprintf (2, ("crossing gen%d, x is %Ix", active_old_gen_number - 1, x)); if (!pinned_plug_que_empty_p()) { dprintf (2, ("oldest pin: %Ix(%Id)", pinned_plug (oldest_pin()), (x - pinned_plug (oldest_pin())))); } if (active_old_gen_number <= (settings.promotion ? (max_generation - 1) : max_generation)) { active_new_gen_number--; } active_old_gen_number--; assert ((!settings.promotion) || (active_new_gen_number>0)); if (active_new_gen_number == (max_generation - 1)) { #ifdef FREE_USAGE_STATS if (settings.condemned_generation == max_generation) { // We need to do this before we skip the rest of the pinned plugs. generation* gen_2 = generation_of (max_generation); generation* gen_1 = generation_of (max_generation - 1); size_t total_num_pinned_free_spaces_left = 0; // We are about to allocate gen1, check to see how efficient fitting in gen2 pinned free spaces is. for (int j = 0; j < NUM_GEN_POWER2; j++) { dprintf (1, ("[h%d][#%Id]2^%d: current: %Id, S: 2: %Id, 1: %Id(%Id)", heap_number, settings.gc_index, (j + 10), gen_2->gen_current_pinned_free_spaces[j], gen_2->gen_plugs[j], gen_1->gen_plugs[j], (gen_2->gen_plugs[j] + gen_1->gen_plugs[j]))); total_num_pinned_free_spaces_left += gen_2->gen_current_pinned_free_spaces[j]; } float pinned_free_list_efficiency = 0; size_t total_pinned_free_space = generation_allocated_in_pinned_free (gen_2) + generation_pinned_free_obj_space (gen_2); if (total_pinned_free_space != 0) { pinned_free_list_efficiency = (float)(generation_allocated_in_pinned_free (gen_2)) / (float)total_pinned_free_space; } dprintf (1, ("[h%d] gen2 allocated %Id bytes with %Id bytes pinned free spaces (effi: %d%%), %Id (%Id) left", heap_number, generation_allocated_in_pinned_free (gen_2), total_pinned_free_space, (int)(pinned_free_list_efficiency * 100), generation_pinned_free_obj_space (gen_2), total_num_pinned_free_spaces_left)); } #endif //FREE_USAGE_STATS //Go past all of the pinned plugs for this generation. while (!pinned_plug_que_empty_p() && (!in_range_for_segment ((pinned_plug (oldest_pin())), ephemeral_heap_segment))) { size_t entry = deque_pinned_plug(); mark* m = pinned_plug_of (entry); uint8_t* plug = pinned_plug (m); size_t len = pinned_len (m); // detect pinned block in different segment (later) than // allocation segment, skip those until the oldest pin is in the ephemeral seg. // adjust the allocation segment along the way (at the end it will // be the ephemeral segment. heap_segment* nseg = heap_segment_in_range (generation_allocation_segment (consing_gen)); PREFIX_ASSUME(nseg != NULL); while (!((plug >= generation_allocation_pointer (consing_gen))&& (plug < heap_segment_allocated (nseg)))) { //adjust the end of the segment to be the end of the plug assert (generation_allocation_pointer (consing_gen)>= heap_segment_mem (nseg)); assert (generation_allocation_pointer (consing_gen)<= heap_segment_committed (nseg)); heap_segment_plan_allocated (nseg) = generation_allocation_pointer (consing_gen); //switch allocation segment nseg = heap_segment_next_rw (nseg); generation_allocation_segment (consing_gen) = nseg; //reset the allocation pointer and limits generation_allocation_pointer (consing_gen) = heap_segment_mem (nseg); } set_new_pin_info (m, generation_allocation_pointer (consing_gen)); assert(pinned_len(m) == 0 || pinned_len(m) >= Align(min_obj_size)); generation_allocation_pointer (consing_gen) = plug + len; generation_allocation_limit (consing_gen) = generation_allocation_pointer (consing_gen); } allocate_in_condemned = TRUE; consing_gen = ensure_ephemeral_heap_segment (consing_gen); } if (active_new_gen_number != max_generation) { if (active_new_gen_number == (max_generation - 1)) { maxgen_pinned_compact_before_advance = generation_pinned_allocation_compact_size (generation_of (max_generation)); if (!demote_gen1_p) advance_pins_for_demotion (consing_gen); } plan_generation_start (generation_of (active_new_gen_number), consing_gen, x); dprintf (2, ("process eph: allocated gen%d start at %Ix", active_new_gen_number, generation_plan_allocation_start (generation_of (active_new_gen_number)))); if ((demotion_low == MAX_PTR) && !pinned_plug_que_empty_p()) { uint8_t* pplug = pinned_plug (oldest_pin()); if (object_gennum (pplug) > 0) { demotion_low = pplug; dprintf (3, ("process eph: dlow->%Ix", demotion_low)); } } assert (generation_plan_allocation_start (generation_of (active_new_gen_number))); } goto retry; } } #endif //!USE_REGIONS inline void gc_heap::seg_clear_mark_bits (heap_segment* seg) { uint8_t* o = heap_segment_mem (seg); while (o < heap_segment_allocated (seg)) { if (marked (o)) { clear_marked (o); } o = o + Align (size (o)); } } #ifdef FEATURE_BASICFREEZE void gc_heap::sweep_ro_segments (heap_segment* start_seg) { //go through all of the segment in range and reset the mark bit heap_segment* seg = start_seg; while (seg) { if (heap_segment_read_only_p (seg) && heap_segment_in_range_p (seg)) { #ifdef BACKGROUND_GC if (settings.concurrent) { seg_clear_mark_array_bits_soh (seg); } else { seg_clear_mark_bits (seg); } #else //BACKGROUND_GC seg_clear_mark_bits (seg); #endif //BACKGROUND_GC } seg = heap_segment_next (seg); } } #endif // FEATURE_BASICFREEZE #ifdef FEATURE_LOH_COMPACTION inline BOOL gc_heap::loh_pinned_plug_que_empty_p() { return (loh_pinned_queue_bos == loh_pinned_queue_tos); } void gc_heap::loh_set_allocator_next_pin() { if (!(loh_pinned_plug_que_empty_p())) { mark* oldest_entry = loh_oldest_pin(); uint8_t* plug = pinned_plug (oldest_entry); generation* gen = large_object_generation; if ((plug >= generation_allocation_pointer (gen)) && (plug < generation_allocation_limit (gen))) { generation_allocation_limit (gen) = pinned_plug (oldest_entry); } else assert (!((plug < generation_allocation_pointer (gen)) && (plug >= heap_segment_mem (generation_allocation_segment (gen))))); } } size_t gc_heap::loh_deque_pinned_plug () { size_t m = loh_pinned_queue_bos; loh_pinned_queue_bos++; return m; } inline mark* gc_heap::loh_pinned_plug_of (size_t bos) { return &loh_pinned_queue[bos]; } inline mark* gc_heap::loh_oldest_pin() { return loh_pinned_plug_of (loh_pinned_queue_bos); } // If we can't grow the queue, then don't compact. BOOL gc_heap::loh_enque_pinned_plug (uint8_t* plug, size_t len) { assert(len >= Align(min_obj_size, get_alignment_constant (FALSE))); if (loh_pinned_queue_length <= loh_pinned_queue_tos) { if (!grow_mark_stack (loh_pinned_queue, loh_pinned_queue_length, LOH_PIN_QUEUE_LENGTH)) { return FALSE; } } dprintf (3, (" P: %Ix(%Id)", plug, len)); mark& m = loh_pinned_queue[loh_pinned_queue_tos]; m.first = plug; m.len = len; loh_pinned_queue_tos++; loh_set_allocator_next_pin(); return TRUE; } inline BOOL gc_heap::loh_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit) { dprintf (1235, ("trying to fit %Id(%Id) between %Ix and %Ix (%Id)", size, (2* AlignQword (loh_padding_obj_size) + size), alloc_pointer, alloc_limit, (alloc_limit - alloc_pointer))); return ((alloc_pointer + 2* AlignQword (loh_padding_obj_size) + size) <= alloc_limit); } uint8_t* gc_heap::loh_allocate_in_condemned (size_t size) { generation* gen = large_object_generation; dprintf (1235, ("E: p:%Ix, l:%Ix, s: %Id", generation_allocation_pointer (gen), generation_allocation_limit (gen), size)); retry: { heap_segment* seg = generation_allocation_segment (gen); if (!(loh_size_fit_p (size, generation_allocation_pointer (gen), generation_allocation_limit (gen)))) { if ((!(loh_pinned_plug_que_empty_p()) && (generation_allocation_limit (gen) == pinned_plug (loh_oldest_pin())))) { mark* m = loh_pinned_plug_of (loh_deque_pinned_plug()); size_t len = pinned_len (m); uint8_t* plug = pinned_plug (m); dprintf (1235, ("AIC: %Ix->%Ix(%Id)", generation_allocation_pointer (gen), plug, plug - generation_allocation_pointer (gen))); pinned_len (m) = plug - generation_allocation_pointer (gen); generation_allocation_pointer (gen) = plug + len; generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); loh_set_allocator_next_pin(); dprintf (1235, ("s: p: %Ix, l: %Ix (%Id)", generation_allocation_pointer (gen), generation_allocation_limit (gen), (generation_allocation_limit (gen) - generation_allocation_pointer (gen)))); goto retry; } if (generation_allocation_limit (gen) != heap_segment_plan_allocated (seg)) { generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); dprintf (1235, ("l->pa(%Ix)", generation_allocation_limit (gen))); } else { if (heap_segment_plan_allocated (seg) != heap_segment_committed (seg)) { heap_segment_plan_allocated (seg) = heap_segment_committed (seg); generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); dprintf (1235, ("l->c(%Ix)", generation_allocation_limit (gen))); } else { if (loh_size_fit_p (size, generation_allocation_pointer (gen), heap_segment_reserved (seg)) && (grow_heap_segment (seg, (generation_allocation_pointer (gen) + size + 2* AlignQword (loh_padding_obj_size))))) { dprintf (1235, ("growing seg from %Ix to %Ix\n", heap_segment_committed (seg), (generation_allocation_pointer (gen) + size))); heap_segment_plan_allocated (seg) = heap_segment_committed (seg); generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); dprintf (1235, ("g: p: %Ix, l: %Ix (%Id)", generation_allocation_pointer (gen), generation_allocation_limit (gen), (generation_allocation_limit (gen) - generation_allocation_pointer (gen)))); } else { heap_segment* next_seg = heap_segment_next (seg); assert (generation_allocation_pointer (gen)>= heap_segment_mem (seg)); // Verify that all pinned plugs for this segment are consumed if (!loh_pinned_plug_que_empty_p() && ((pinned_plug (loh_oldest_pin()) < heap_segment_allocated (seg)) && (pinned_plug (loh_oldest_pin()) >= generation_allocation_pointer (gen)))) { LOG((LF_GC, LL_INFO10, "remaining pinned plug %Ix while leaving segment on allocation", pinned_plug (loh_oldest_pin()))); dprintf (1, ("queue empty: %d", loh_pinned_plug_que_empty_p())); FATAL_GC_ERROR(); } assert (generation_allocation_pointer (gen)>= heap_segment_mem (seg)); assert (generation_allocation_pointer (gen)<= heap_segment_committed (seg)); heap_segment_plan_allocated (seg) = generation_allocation_pointer (gen); if (next_seg) { // for LOH do we want to try starting from the first LOH every time though? generation_allocation_segment (gen) = next_seg; generation_allocation_pointer (gen) = heap_segment_mem (next_seg); generation_allocation_limit (gen) = generation_allocation_pointer (gen); dprintf (1235, ("n: p: %Ix, l: %Ix (%Id)", generation_allocation_pointer (gen), generation_allocation_limit (gen), (generation_allocation_limit (gen) - generation_allocation_pointer (gen)))); } else { dprintf (1, ("We ran out of space compacting, shouldn't happen")); FATAL_GC_ERROR(); } } } } loh_set_allocator_next_pin(); dprintf (1235, ("r: p: %Ix, l: %Ix (%Id)", generation_allocation_pointer (gen), generation_allocation_limit (gen), (generation_allocation_limit (gen) - generation_allocation_pointer (gen)))); goto retry; } } { assert (generation_allocation_pointer (gen)>= heap_segment_mem (generation_allocation_segment (gen))); uint8_t* result = generation_allocation_pointer (gen); size_t loh_pad = AlignQword (loh_padding_obj_size); generation_allocation_pointer (gen) += size + loh_pad; assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen)); dprintf (1235, ("p: %Ix, l: %Ix (%Id)", generation_allocation_pointer (gen), generation_allocation_limit (gen), (generation_allocation_limit (gen) - generation_allocation_pointer (gen)))); assert (result + loh_pad); return result + loh_pad; } } BOOL gc_heap::loh_compaction_requested() { // If hard limit is specified GC will automatically decide if LOH needs to be compacted. return (loh_compaction_always_p || (loh_compaction_mode != loh_compaction_default)); } inline void gc_heap::check_loh_compact_mode (BOOL all_heaps_compacted_p) { if (settings.loh_compaction && (loh_compaction_mode == loh_compaction_once)) { if (all_heaps_compacted_p) { // If the compaction mode says to compact once and we are going to compact LOH, // we need to revert it back to no compaction. loh_compaction_mode = loh_compaction_default; } } } BOOL gc_heap::plan_loh() { #ifdef FEATURE_EVENT_TRACE uint64_t start_time, end_time; if (informational_event_enabled_p) { memset (loh_compact_info, 0, (sizeof (etw_loh_compact_info) * get_num_heaps())); start_time = GetHighPrecisionTimeStamp(); } #endif //FEATURE_EVENT_TRACE if (!loh_pinned_queue) { loh_pinned_queue = new (nothrow) (mark [LOH_PIN_QUEUE_LENGTH]); if (!loh_pinned_queue) { dprintf (1, ("Cannot allocate the LOH pinned queue (%Id bytes), no compaction", LOH_PIN_QUEUE_LENGTH * sizeof (mark))); return FALSE; } loh_pinned_queue_length = LOH_PIN_QUEUE_LENGTH; } if (heap_number == 0) loh_pinned_queue_decay = LOH_PIN_DECAY; loh_pinned_queue_tos = 0; loh_pinned_queue_bos = 0; generation* gen = large_object_generation; heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(start_seg != NULL); heap_segment* seg = start_seg; uint8_t* o = get_uoh_start_object (seg, gen); dprintf (1235, ("before GC LOH size: %Id, free list: %Id, free obj: %Id\n", generation_size (loh_generation), generation_free_list_space (gen), generation_free_obj_space (gen))); while (seg) { heap_segment_plan_allocated (seg) = heap_segment_mem (seg); seg = heap_segment_next (seg); } seg = start_seg; // We don't need to ever realloc gen3 start so don't touch it. heap_segment_plan_allocated (seg) = o; generation_allocation_pointer (gen) = o; generation_allocation_limit (gen) = generation_allocation_pointer (gen); generation_allocation_segment (gen) = start_seg; uint8_t* free_space_start = o; uint8_t* free_space_end = o; uint8_t* new_address = 0; while (1) { if (o >= heap_segment_allocated (seg)) { seg = heap_segment_next (seg); if (seg == 0) { break; } o = heap_segment_mem (seg); } if (marked (o)) { free_space_end = o; size_t size = AlignQword (size (o)); dprintf (1235, ("%Ix(%Id) M", o, size)); if (pinned (o)) { // We don't clear the pinned bit yet so we can check in // compact phase how big a free object we should allocate // in front of the pinned object. We use the reloc address // field to store this. if (!loh_enque_pinned_plug (o, size)) { return FALSE; } new_address = o; } else { new_address = loh_allocate_in_condemned (size); } loh_set_node_relocation_distance (o, (new_address - o)); dprintf (1235, ("lobj %Ix-%Ix -> %Ix-%Ix (%Id)", o, (o + size), new_address, (new_address + size), (new_address - o))); o = o + size; free_space_start = o; if (o < heap_segment_allocated (seg)) { assert (!marked (o)); } } else { while (o < heap_segment_allocated (seg) && !marked (o)) { dprintf (1235, ("%Ix(%Id) F (%d)", o, AlignQword (size (o)), ((method_table (o) == g_gc_pFreeObjectMethodTable) ? 1 : 0))); o = o + AlignQword (size (o)); } } } while (!loh_pinned_plug_que_empty_p()) { mark* m = loh_pinned_plug_of (loh_deque_pinned_plug()); size_t len = pinned_len (m); uint8_t* plug = pinned_plug (m); // detect pinned block in different segment (later) than // allocation segment heap_segment* nseg = heap_segment_rw (generation_allocation_segment (gen)); while ((plug < generation_allocation_pointer (gen)) || (plug >= heap_segment_allocated (nseg))) { assert ((plug < heap_segment_mem (nseg)) || (plug > heap_segment_reserved (nseg))); //adjust the end of the segment to be the end of the plug assert (generation_allocation_pointer (gen)>= heap_segment_mem (nseg)); assert (generation_allocation_pointer (gen)<= heap_segment_committed (nseg)); heap_segment_plan_allocated (nseg) = generation_allocation_pointer (gen); //switch allocation segment nseg = heap_segment_next_rw (nseg); generation_allocation_segment (gen) = nseg; //reset the allocation pointer and limits generation_allocation_pointer (gen) = heap_segment_mem (nseg); } dprintf (1235, ("SP: %Ix->%Ix(%Id)", generation_allocation_pointer (gen), plug, plug - generation_allocation_pointer (gen))); pinned_len (m) = plug - generation_allocation_pointer (gen); generation_allocation_pointer (gen) = plug + len; } heap_segment_plan_allocated (generation_allocation_segment (gen)) = generation_allocation_pointer (gen); generation_allocation_pointer (gen) = 0; generation_allocation_limit (gen) = 0; #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { end_time = GetHighPrecisionTimeStamp(); loh_compact_info[heap_number].time_plan = limit_time_to_uint32 (end_time - start_time); } #endif //FEATURE_EVENT_TRACE return TRUE; } void gc_heap::compact_loh() { assert (loh_compaction_requested() || heap_hard_limit || conserve_mem_setting); #ifdef FEATURE_EVENT_TRACE uint64_t start_time, end_time; if (informational_event_enabled_p) { start_time = GetHighPrecisionTimeStamp(); } #endif //FEATURE_EVENT_TRACE generation* gen = large_object_generation; heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(start_seg != NULL); heap_segment* seg = start_seg; heap_segment* prev_seg = 0; uint8_t* o = get_uoh_start_object (seg, gen); // We don't need to ever realloc gen3 start so don't touch it. uint8_t* free_space_start = o; uint8_t* free_space_end = o; generation_allocator (gen)->clear(); generation_free_list_space (gen) = 0; generation_free_obj_space (gen) = 0; loh_pinned_queue_bos = 0; while (1) { if (o >= heap_segment_allocated (seg)) { heap_segment* next_seg = heap_segment_next (seg); // REGIONS TODO: for regions we can get rid of the start_seg. Just need // to update start region accordingly. if ((heap_segment_plan_allocated (seg) == heap_segment_mem (seg)) && (seg != start_seg) && !heap_segment_read_only_p (seg)) { dprintf (3, ("Preparing empty large segment %Ix", (size_t)seg)); assert (prev_seg); heap_segment_next (prev_seg) = next_seg; heap_segment_next (seg) = freeable_uoh_segment; freeable_uoh_segment = seg; #ifdef USE_REGIONS update_start_tail_regions (gen, seg, prev_seg, next_seg); #endif //USE_REGIONS } else { if (!heap_segment_read_only_p (seg)) { // We grew the segment to accommodate allocations. if (heap_segment_plan_allocated (seg) > heap_segment_allocated (seg)) { if ((heap_segment_plan_allocated (seg) - plug_skew) > heap_segment_used (seg)) { heap_segment_used (seg) = heap_segment_plan_allocated (seg) - plug_skew; } } heap_segment_allocated (seg) = heap_segment_plan_allocated (seg); dprintf (3, ("Trimming seg to %Ix[", heap_segment_allocated (seg))); decommit_heap_segment_pages (seg, 0); dprintf (1236, ("CLOH: seg: %Ix, alloc: %Ix, used: %Ix, committed: %Ix", seg, heap_segment_allocated (seg), heap_segment_used (seg), heap_segment_committed (seg))); //heap_segment_used (seg) = heap_segment_allocated (seg) - plug_skew; dprintf (1236, ("CLOH: used is set to %Ix", heap_segment_used (seg))); } prev_seg = seg; } seg = next_seg; if (seg == 0) break; else { o = heap_segment_mem (seg); } } if (marked (o)) { free_space_end = o; size_t size = AlignQword (size (o)); size_t loh_pad; uint8_t* reloc = o; clear_marked (o); if (pinned (o)) { // We are relying on the fact the pinned objects are always looked at in the same order // in plan phase and in compact phase. mark* m = loh_pinned_plug_of (loh_deque_pinned_plug()); uint8_t* plug = pinned_plug (m); assert (plug == o); loh_pad = pinned_len (m); clear_pinned (o); } else { loh_pad = AlignQword (loh_padding_obj_size); reloc += loh_node_relocation_distance (o); gcmemcopy (reloc, o, size, TRUE); } thread_gap ((reloc - loh_pad), loh_pad, gen); o = o + size; free_space_start = o; if (o < heap_segment_allocated (seg)) { assert (!marked (o)); } } else { while (o < heap_segment_allocated (seg) && !marked (o)) { o = o + AlignQword (size (o)); } } } #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { end_time = GetHighPrecisionTimeStamp(); loh_compact_info[heap_number].time_compact = limit_time_to_uint32 (end_time - start_time); } #endif //FEATURE_EVENT_TRACE assert (loh_pinned_plug_que_empty_p()); dprintf (1235, ("after GC LOH size: %Id, free list: %Id, free obj: %Id\n\n", generation_size (loh_generation), generation_free_list_space (gen), generation_free_obj_space (gen))); } #ifdef FEATURE_EVENT_TRACE inline void gc_heap::loh_reloc_survivor_helper (uint8_t** pval, size_t& total_refs, size_t& zero_refs) { uint8_t* val = *pval; if (!val) zero_refs++; total_refs++; reloc_survivor_helper (pval); } #endif //FEATURE_EVENT_TRACE void gc_heap::relocate_in_loh_compact() { generation* gen = large_object_generation; heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); uint8_t* o = get_uoh_start_object (seg, gen); #ifdef FEATURE_EVENT_TRACE size_t total_refs = 0; size_t zero_refs = 0; uint64_t start_time, end_time; if (informational_event_enabled_p) { start_time = GetHighPrecisionTimeStamp(); } #endif //FEATURE_EVENT_TRACE while (1) { if (o >= heap_segment_allocated (seg)) { seg = heap_segment_next (seg); if (seg == 0) { break; } o = heap_segment_mem (seg); } if (marked (o)) { size_t size = AlignQword (size (o)); check_class_object_demotion (o); if (contain_pointers (o)) { #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { go_through_object_nostart (method_table (o), o, size(o), pval, { loh_reloc_survivor_helper (pval, total_refs, zero_refs); }); } else #endif //FEATURE_EVENT_TRACE { go_through_object_nostart (method_table (o), o, size(o), pval, { reloc_survivor_helper (pval); }); } } o = o + size; if (o < heap_segment_allocated (seg)) { assert (!marked (o)); } } else { while (o < heap_segment_allocated (seg) && !marked (o)) { o = o + AlignQword (size (o)); } } } #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { end_time = GetHighPrecisionTimeStamp(); loh_compact_info[heap_number].time_relocate = limit_time_to_uint32 (end_time - start_time); loh_compact_info[heap_number].total_refs = total_refs; loh_compact_info[heap_number].zero_refs = zero_refs; } #endif //FEATURE_EVENT_TRACE dprintf (1235, ("after GC LOH size: %Id, free list: %Id, free obj: %Id\n\n", generation_size (loh_generation), generation_free_list_space (gen), generation_free_obj_space (gen))); } void gc_heap::walk_relocation_for_loh (void* profiling_context, record_surv_fn fn) { generation* gen = large_object_generation; heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); uint8_t* o = get_uoh_start_object (seg, gen); while (1) { if (o >= heap_segment_allocated (seg)) { seg = heap_segment_next (seg); if (seg == 0) { break; } o = heap_segment_mem (seg); } if (marked (o)) { size_t size = AlignQword (size (o)); ptrdiff_t reloc = loh_node_relocation_distance (o); STRESS_LOG_PLUG_MOVE(o, (o + size), -reloc); fn (o, (o + size), reloc, profiling_context, !!settings.compaction, false); o = o + size; if (o < heap_segment_allocated (seg)) { assert (!marked (o)); } } else { while (o < heap_segment_allocated (seg) && !marked (o)) { o = o + AlignQword (size (o)); } } } } BOOL gc_heap::loh_object_p (uint8_t* o) { #ifdef MULTIPLE_HEAPS gc_heap* hp = gc_heap::g_heaps [0]; int brick_entry = hp->brick_table[hp->brick_of (o)]; #else //MULTIPLE_HEAPS int brick_entry = brick_table[brick_of (o)]; #endif //MULTIPLE_HEAPS return (brick_entry == 0); } #endif //FEATURE_LOH_COMPACTION void gc_heap::convert_to_pinned_plug (BOOL& last_npinned_plug_p, BOOL& last_pinned_plug_p, BOOL& pinned_plug_p, size_t ps, size_t& artificial_pinned_size) { last_npinned_plug_p = FALSE; last_pinned_plug_p = TRUE; pinned_plug_p = TRUE; artificial_pinned_size = ps; } // Because we have the artificial pinning, we can't guarantee that pinned and npinned // plugs are always interleaved. void gc_heap::store_plug_gap_info (uint8_t* plug_start, uint8_t* plug_end, BOOL& last_npinned_plug_p, BOOL& last_pinned_plug_p, uint8_t*& last_pinned_plug, BOOL& pinned_plug_p, uint8_t* last_object_in_last_plug, BOOL& merge_with_last_pin_p, // this is only for verification purpose size_t last_plug_len) { UNREFERENCED_PARAMETER(last_plug_len); if (!last_npinned_plug_p && !last_pinned_plug_p) { //dprintf (3, ("last full plug end: %Ix, full plug start: %Ix", plug_end, plug_start)); dprintf (3, ("Free: %Ix", (plug_start - plug_end))); assert ((plug_start == plug_end) || ((size_t)(plug_start - plug_end) >= Align (min_obj_size))); set_gap_size (plug_start, plug_start - plug_end); } if (pinned (plug_start)) { BOOL save_pre_plug_info_p = FALSE; if (last_npinned_plug_p || last_pinned_plug_p) { //if (last_plug_len == Align (min_obj_size)) //{ // dprintf (3, ("debugging only - last npinned plug is min, check to see if it's correct")); // GCToOSInterface::DebugBreak(); //} save_pre_plug_info_p = TRUE; } pinned_plug_p = TRUE; last_npinned_plug_p = FALSE; if (last_pinned_plug_p) { dprintf (3, ("last plug %Ix was also pinned, should merge", last_pinned_plug)); merge_with_last_pin_p = TRUE; } else { last_pinned_plug_p = TRUE; last_pinned_plug = plug_start; enque_pinned_plug (last_pinned_plug, save_pre_plug_info_p, last_object_in_last_plug); if (save_pre_plug_info_p) { #ifdef DOUBLY_LINKED_FL if (last_object_in_last_plug == generation_last_free_list_allocated(generation_of(max_generation))) { saved_pinned_plug_index = mark_stack_tos; } #endif //DOUBLY_LINKED_FL set_gap_size (plug_start, sizeof (gap_reloc_pair)); } } } else { if (last_pinned_plug_p) { //if (Align (last_plug_len) < min_pre_pin_obj_size) //{ // dprintf (3, ("debugging only - last pinned plug is min, check to see if it's correct")); // GCToOSInterface::DebugBreak(); //} save_post_plug_info (last_pinned_plug, last_object_in_last_plug, plug_start); set_gap_size (plug_start, sizeof (gap_reloc_pair)); verify_pins_with_post_plug_info("after saving post plug info"); } last_npinned_plug_p = TRUE; last_pinned_plug_p = FALSE; } } void gc_heap::record_interesting_data_point (interesting_data_point idp) { #ifdef GC_CONFIG_DRIVEN (interesting_data_per_gc[idp])++; #else UNREFERENCED_PARAMETER(idp); #endif //GC_CONFIG_DRIVEN } #ifdef USE_REGIONS void gc_heap::skip_pins_in_alloc_region (generation* consing_gen, int plan_gen_num) { heap_segment* alloc_region = generation_allocation_segment (consing_gen); while (!pinned_plug_que_empty_p()) { uint8_t* oldest_plug = pinned_plug (oldest_pin()); if ((oldest_plug >= generation_allocation_pointer (consing_gen)) && (oldest_plug < heap_segment_allocated (alloc_region))) { mark* m = pinned_plug_of (deque_pinned_plug()); uint8_t* plug = pinned_plug (m); size_t len = pinned_len (m); set_new_pin_info (m, generation_allocation_pointer (consing_gen)); dprintf (REGIONS_LOG, ("pin %Ix b: %Ix->%Ix", plug, brick_of (plug), (size_t)(brick_table[brick_of (plug)]))); generation_allocation_pointer (consing_gen) = plug + len; } else { // Exit when we detect the first pin that's not on the alloc seg anymore. break; } } dprintf (REGIONS_LOG, ("finished with alloc region %Ix, (%s) plan gen -> %d", heap_segment_mem (alloc_region), (heap_segment_swept_in_plan (alloc_region) ? "SIP" : "non SIP"), (heap_segment_swept_in_plan (alloc_region) ? heap_segment_plan_gen_num (alloc_region) : plan_gen_num))); set_region_plan_gen_num_sip (alloc_region, plan_gen_num); heap_segment_plan_allocated (alloc_region) = generation_allocation_pointer (consing_gen); } void gc_heap::decide_on_demotion_pin_surv (heap_segment* region) { int new_gen_num = 0; if (settings.promotion) { // If this region doesn't have much pinned surv left, we demote it; otherwise the region // will be promoted like normal. size_t basic_region_size = (size_t)1 << min_segment_size_shr; if ((int)(((double)heap_segment_pinned_survived (region) * 100.0) / (double)basic_region_size) >= demotion_pinned_ratio_th) { new_gen_num = get_plan_gen_num (heap_segment_gen_num (region)); } } set_region_plan_gen_num_sip (region, new_gen_num); } // If the next plan gen number is different, since different generations cannot share the same // region, we need to get a new alloc region and skip all remaining pins in the alloc region if // any. void gc_heap::process_last_np_surv_region (generation* consing_gen, int current_plan_gen_num, int next_plan_gen_num) { heap_segment* alloc_region = generation_allocation_segment (consing_gen); //assert (in_range_for_segment (generation_allocation_pointer (consing_gen), alloc_region)); // I'm not using in_range_for_segment here because alloc pointer/limit can be exactly the same // as reserved. size_fit_p in allocate_in_condemned_generations can be used to fit the exact // size of a plug at the end of the segment which makes alloc pointer/limit both reserved // on exit of that method. uint8_t* consing_gen_alloc_ptr = generation_allocation_pointer (consing_gen); assert ((consing_gen_alloc_ptr >= heap_segment_mem (alloc_region)) && (consing_gen_alloc_ptr <= heap_segment_reserved (alloc_region))); dprintf (REGIONS_LOG, ("h%d next need to plan gen%d, consing alloc region: %Ix, ptr: %Ix(consing gen: %d)", heap_number, next_plan_gen_num, heap_segment_mem (alloc_region), generation_allocation_pointer (consing_gen), consing_gen->gen_num)); if (current_plan_gen_num != next_plan_gen_num) { // If we haven't needed to consume this alloc region at all, we can use it to allocate the new // gen. if (generation_allocation_pointer (consing_gen) == heap_segment_mem (alloc_region)) { dprintf (REGIONS_LOG, ("h%d alloc region %Ix unused, using it to plan %d", heap_number, heap_segment_mem (alloc_region), next_plan_gen_num)); return; } // skip all the pins in this region since we cannot use it to plan the next gen. skip_pins_in_alloc_region (consing_gen, current_plan_gen_num); heap_segment* next_region = heap_segment_next (alloc_region); if (!next_region) { int gen_num = heap_segment_gen_num (alloc_region); if (gen_num > 0) { next_region = generation_start_segment (generation_of (gen_num - 1)); dprintf (REGIONS_LOG, ("h%d consing switching to next gen%d seg %Ix", heap_number, heap_segment_gen_num (next_region), heap_segment_mem (next_region))); } else { if (settings.promotion) { assert (next_plan_gen_num == 0); next_region = get_new_region (0); if (next_region) { dprintf (REGIONS_LOG, ("h%d getting a new region for gen0 plan start seg to %Ix", heap_number, heap_segment_mem (next_region))); } else { dprintf (REGIONS_LOG, ("h%d couldn't get a region to plan gen0, special sweep on", heap_number)); special_sweep_p = true; } } else { assert (!"ran out of regions for non promotion case??"); } } } else { dprintf (REGIONS_LOG, ("h%d consing switching to next seg %Ix in gen%d to alloc in", heap_number, heap_segment_mem (next_region), heap_segment_gen_num (next_region))); } if (next_region) { init_alloc_info (consing_gen, next_region); dprintf (REGIONS_LOG, ("h%d consing(%d) alloc seg: %Ix(%Ix, %Ix), ptr: %Ix, planning gen%d", heap_number, consing_gen->gen_num, heap_segment_mem (generation_allocation_segment (consing_gen)), heap_segment_allocated (generation_allocation_segment (consing_gen)), heap_segment_plan_allocated (generation_allocation_segment (consing_gen)), generation_allocation_pointer (consing_gen), next_plan_gen_num)); } else { assert (special_sweep_p); } } } void gc_heap::process_remaining_regions (int current_plan_gen_num, generation* consing_gen) { assert ((current_plan_gen_num == 0) || (!settings.promotion && (current_plan_gen_num == -1))); if (special_sweep_p) { assert (pinned_plug_que_empty_p()); } dprintf (REGIONS_LOG, ("h%d PRR: plan %d: consing alloc seg: %Ix, ptr: %Ix", heap_number, current_plan_gen_num, heap_segment_mem (generation_allocation_segment (consing_gen)), generation_allocation_pointer (consing_gen))); if (current_plan_gen_num == -1) { assert (!settings.promotion); current_plan_gen_num = 0; } while (!pinned_plug_que_empty_p()) { uint8_t* oldest_plug = pinned_plug (oldest_pin()); // detect pinned block in segments without pins heap_segment* nseg = heap_segment_rw (generation_allocation_segment (consing_gen)); dprintf (3, ("h%d oldest pin: %Ix, consing alloc %Ix, ptr %Ix, limit %Ix", heap_number, oldest_plug, heap_segment_mem (nseg), generation_allocation_pointer (consing_gen), generation_allocation_limit (consing_gen))); while ((oldest_plug < generation_allocation_pointer (consing_gen)) || (oldest_plug >= heap_segment_allocated (nseg))) { assert ((oldest_plug < heap_segment_mem (nseg)) || (oldest_plug > heap_segment_reserved (nseg))); assert (generation_allocation_pointer (consing_gen)>= heap_segment_mem (nseg)); assert (generation_allocation_pointer (consing_gen)<= heap_segment_committed (nseg)); dprintf (3, ("h%d PRR: in loop, seg %Ix pa %Ix -> alloc ptr %Ix, plan gen %d->%d", heap_number, heap_segment_mem (nseg), heap_segment_plan_allocated (nseg), generation_allocation_pointer (consing_gen), heap_segment_plan_gen_num (nseg), current_plan_gen_num)); if (!heap_segment_swept_in_plan (nseg)) { heap_segment_plan_allocated (nseg) = generation_allocation_pointer (consing_gen); } decide_on_demotion_pin_surv (nseg); heap_segment* next_seg = heap_segment_next_non_sip (nseg); if ((next_seg == 0) && (heap_segment_gen_num (nseg) > 0)) { next_seg = generation_start_segment (generation_of (heap_segment_gen_num (nseg) - 1)); dprintf (3, ("h%d PRR: switching to next gen%d start %Ix", heap_number, heap_segment_gen_num (next_seg), (size_t)next_seg)); } assert (next_seg != 0); nseg = next_seg; generation_allocation_segment (consing_gen) = nseg; generation_allocation_pointer (consing_gen) = heap_segment_mem (nseg); } mark* m = pinned_plug_of (deque_pinned_plug()); uint8_t* plug = pinned_plug (m); size_t len = pinned_len (m); set_new_pin_info (m, generation_allocation_pointer (consing_gen)); size_t free_size = pinned_len (m); update_planned_gen0_free_space (free_size, plug); dprintf (2, ("h%d plug %Ix-%Ix(%Id), free space before %Ix-%Ix(%Id)", heap_number, plug, (plug + len), len, generation_allocation_pointer (consing_gen), plug, free_size)); generation_allocation_pointer (consing_gen) = plug + len; generation_allocation_limit (consing_gen) = generation_allocation_pointer (consing_gen); } heap_segment* current_region = generation_allocation_segment (consing_gen); if (special_sweep_p) { assert (heap_segment_next_rw (current_region) == 0); return; } set_region_plan_gen_num_sip (current_region, current_plan_gen_num); if (!heap_segment_swept_in_plan (current_region)) { heap_segment_plan_allocated (current_region) = generation_allocation_pointer (consing_gen); dprintf (REGIONS_LOG, ("h%d setting alloc seg %Ix plan alloc to %Ix", heap_number, heap_segment_mem (current_region), heap_segment_plan_allocated (current_region))); } heap_segment* region_no_pins = heap_segment_next (current_region); int region_no_pins_gen_num = heap_segment_gen_num (current_region); do { region_no_pins = heap_segment_non_sip (region_no_pins); if (region_no_pins) { set_region_plan_gen_num (region_no_pins, current_plan_gen_num); heap_segment_plan_allocated (region_no_pins) = heap_segment_mem (region_no_pins); dprintf (REGIONS_LOG, ("h%d setting seg %Ix(no pins) plan gen to 0, plan alloc to %Ix", heap_number, heap_segment_mem (region_no_pins), heap_segment_plan_allocated (region_no_pins))); region_no_pins = heap_segment_next (region_no_pins); } else { if (region_no_pins_gen_num > 0) { region_no_pins_gen_num--; region_no_pins = generation_start_segment (generation_of (region_no_pins_gen_num)); } else break; } } while (region_no_pins); } void gc_heap::grow_mark_list_piece() { if (g_mark_list_piece_size < region_count) { delete[] g_mark_list_piece; // at least double the size size_t alloc_count = max ((g_mark_list_piece_size * 2), region_count); // we need two arrays with alloc_count entries per heap g_mark_list_piece = new (nothrow) uint8_t * *[alloc_count * 2 * get_num_heaps()]; if (g_mark_list_piece != nullptr) { g_mark_list_piece_size = alloc_count; } else { g_mark_list_piece_size = 0; } } } void gc_heap::save_current_survived() { if (!survived_per_region) return; size_t region_info_to_copy = region_count * sizeof (size_t); memcpy (old_card_survived_per_region, survived_per_region, region_info_to_copy); #ifdef _DEBUG for (size_t region_index = 0; region_index < region_count; region_index++) { if (survived_per_region[region_index] != 0) { dprintf (REGIONS_LOG, ("region#[%3d]: %Id", region_index, survived_per_region[region_index])); } } dprintf (REGIONS_LOG, ("global reported %Id", promoted_bytes (heap_number))); #endif //_DEBUG } void gc_heap::update_old_card_survived() { if (!survived_per_region) return; for (size_t region_index = 0; region_index < region_count; region_index++) { old_card_survived_per_region[region_index] = survived_per_region[region_index] - old_card_survived_per_region[region_index]; if (survived_per_region[region_index] != 0) { dprintf (REGIONS_LOG, ("region#[%3d]: %Id (card: %Id)", region_index, survived_per_region[region_index], old_card_survived_per_region[region_index])); } } } void gc_heap::update_planned_gen0_free_space (size_t free_size, uint8_t* plug) { gen0_pinned_free_space += free_size; if (!gen0_large_chunk_found) { gen0_large_chunk_found = (free_size >= END_SPACE_AFTER_GC_FL); if (gen0_large_chunk_found) { dprintf (3, ("h%d found large pin free space: %Id at %Ix", heap_number, free_size, plug)); } } } // REGIONS TODO: I wrote this in the same spirit as ephemeral_gen_fit_p but we really should // take committed into consideration instead of reserved. We could also avoid going through // the regions again and do this update in plan phase. void gc_heap::get_gen0_end_plan_space() { for (int gen_idx = settings.condemned_generation; gen_idx >= 0; gen_idx--) { generation* gen = generation_of (gen_idx); heap_segment* region = heap_segment_rw (generation_start_segment (gen)); while (region) { if (heap_segment_plan_gen_num (region) == 0) { size_t end_plan_space = heap_segment_reserved (region) - heap_segment_plan_allocated (region); if (!gen0_large_chunk_found) { gen0_large_chunk_found = (end_plan_space >= END_SPACE_AFTER_GC_FL); if (gen0_large_chunk_found) { dprintf (REGIONS_LOG, ("h%d found large end space: %Id in region %Ix", heap_number, end_plan_space, heap_segment_mem (region))); } } dprintf (REGIONS_LOG, ("h%d found end space: %Id in region %Ix, total %Id->%Id", heap_number, end_plan_space, heap_segment_mem (region), end_gen0_region_space, (end_gen0_region_space + end_plan_space))); end_gen0_region_space += end_plan_space; } region = heap_segment_next (region); } } } size_t gc_heap::get_gen0_end_space() { size_t end_space = 0; heap_segment* seg = generation_start_segment (generation_of (0)); while (seg) { // TODO - // This method can also be called concurrently by full GC notification but // there's no synchronization between checking for ephemeral_heap_segment and // getting alloc_allocated so for now we just always use heap_segment_allocated. //uint8_t* allocated = ((seg == ephemeral_heap_segment) ? // alloc_allocated : heap_segment_allocated (seg)); uint8_t* allocated = heap_segment_allocated (seg); end_space += heap_segment_reserved (seg) - allocated; dprintf (REGIONS_LOG, ("h%d gen0 seg %Ix, end %Ix-%Ix=%Ix, end_space->%Id", heap_number, heap_segment_mem (seg), heap_segment_reserved (seg), allocated, (heap_segment_reserved (seg) - allocated), end_space)); seg = heap_segment_next (seg); } return end_space; } #endif //USE_REGIONS inline uint8_t* gc_heap::find_next_marked (uint8_t* x, uint8_t* end, BOOL use_mark_list, uint8_t**& mark_list_next, uint8_t** mark_list_index) { if (use_mark_list) { uint8_t* old_x = x; while ((mark_list_next < mark_list_index) && (*mark_list_next <= x)) { mark_list_next++; } x = end; if ((mark_list_next < mark_list_index) #ifdef MULTIPLE_HEAPS && (*mark_list_next < end) //for multiple segments #endif //MULTIPLE_HEAPS ) x = *mark_list_next; #ifdef BACKGROUND_GC if (current_c_gc_state == c_gc_state_marking) { assert(gc_heap::background_running_p()); bgc_clear_batch_mark_array_bits (old_x, x); } #endif //BACKGROUND_GC } else { uint8_t* xl = x; #ifdef BACKGROUND_GC if (current_c_gc_state == c_gc_state_marking) { assert (gc_heap::background_running_p()); while ((xl < end) && !marked (xl)) { dprintf (4, ("-%Ix-", (size_t)xl)); assert ((size (xl) > 0)); background_object_marked (xl, TRUE); xl = xl + Align (size (xl)); Prefetch (xl); } } else #endif //BACKGROUND_GC { while ((xl < end) && !marked (xl)) { dprintf (4, ("-%Ix-", (size_t)xl)); assert ((size (xl) > 0)); xl = xl + Align (size (xl)); Prefetch (xl); } } assert (xl <= end); x = xl; } return x; } #ifdef FEATURE_EVENT_TRACE void gc_heap::init_bucket_info() { memset (bucket_info, 0, sizeof (bucket_info)); } void gc_heap::add_plug_in_condemned_info (generation* gen, size_t plug_size) { uint32_t bucket_index = generation_allocator (gen)->first_suitable_bucket (plug_size); (bucket_info[bucket_index].count)++; bucket_info[bucket_index].size += plug_size; } #endif //FEATURE_EVENT_TRACE #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:21000) // Suppress PREFast warning about overly large function #endif //_PREFAST_ void gc_heap::plan_phase (int condemned_gen_number) { size_t old_gen2_allocated = 0; size_t old_gen2_size = 0; if (condemned_gen_number == (max_generation - 1)) { old_gen2_allocated = generation_free_list_allocated (generation_of (max_generation)); old_gen2_size = generation_size (max_generation); } assert (settings.concurrent == FALSE); dprintf (2,(ThreadStressLog::gcStartPlanMsg(), heap_number, condemned_gen_number, settings.promotion ? 1 : 0)); generation* condemned_gen1 = generation_of (condemned_gen_number); BOOL use_mark_list = FALSE; #ifdef GC_CONFIG_DRIVEN dprintf (3, ("total number of marked objects: %Id (%Id)", (mark_list_index - &mark_list[0]), (mark_list_end - &mark_list[0]))); if (mark_list_index >= (mark_list_end + 1)) { mark_list_index = mark_list_end + 1; #ifndef MULTIPLE_HEAPS // in Server GC, we check for mark list overflow in sort_mark_list mark_list_overflow = true; #endif } #else //GC_CONFIG_DRIVEN dprintf (3, ("mark_list length: %Id", (mark_list_index - &mark_list[0]))); #endif //GC_CONFIG_DRIVEN if ((condemned_gen_number < max_generation) && (mark_list_index <= mark_list_end)) { #ifndef MULTIPLE_HEAPS #ifdef USE_VXSORT do_vxsort (mark_list, mark_list_index - mark_list, slow, shigh); #else //USE_VXSORT _sort (&mark_list[0], mark_list_index - 1, 0); #endif //USE_VXSORT dprintf (3, ("using mark list at GC #%Id", (size_t)settings.gc_index)); //verify_qsort_array (&mark_list[0], mark_list_index-1); #endif //!MULTIPLE_HEAPS use_mark_list = TRUE; get_gc_data_per_heap()->set_mechanism_bit(gc_mark_list_bit); } else { dprintf (3, ("mark_list not used")); } #ifdef FEATURE_BASICFREEZE #ifdef USE_REGIONS assert (!ro_segments_in_range); #else //USE_REGIONS if ((generation_start_segment (condemned_gen1) != ephemeral_heap_segment) && ro_segments_in_range) { sweep_ro_segments (generation_start_segment (condemned_gen1)); } #endif //USE_REGIONS #endif // FEATURE_BASICFREEZE #ifndef MULTIPLE_HEAPS int condemned_gen_index = get_stop_generation_index (condemned_gen_number); for (; condemned_gen_index <= condemned_gen_number; condemned_gen_index++) { generation* current_gen = generation_of (condemned_gen_index); if (shigh != (uint8_t*)0) { heap_segment* seg = heap_segment_rw (generation_start_segment (current_gen)); PREFIX_ASSUME(seg != NULL); heap_segment* fseg = seg; do { if (in_range_for_segment (slow, seg)) { uint8_t* start_unmarked = 0; #ifdef USE_REGIONS start_unmarked = heap_segment_mem (seg); #else //USE_REGIONS if (seg == fseg) { uint8_t* o = generation_allocation_start (current_gen); o += get_soh_start_obj_len (o); if (slow > o) { start_unmarked = o; assert ((slow - o) >= (int)Align (min_obj_size)); } } else { assert (condemned_gen_number == max_generation); start_unmarked = heap_segment_mem (seg); } #endif //USE_REGIONS if (start_unmarked) { size_t unmarked_size = slow - start_unmarked; if (unmarked_size > 0) { #ifdef BACKGROUND_GC if (current_c_gc_state == c_gc_state_marking) { bgc_clear_batch_mark_array_bits (start_unmarked, slow); } #endif //BACKGROUND_GC make_unused_array (start_unmarked, unmarked_size); } } } if (in_range_for_segment (shigh, seg)) { #ifdef BACKGROUND_GC if (current_c_gc_state == c_gc_state_marking) { bgc_clear_batch_mark_array_bits ((shigh + Align (size (shigh))), heap_segment_allocated (seg)); } #endif //BACKGROUND_GC heap_segment_saved_allocated (seg) = heap_segment_allocated (seg); heap_segment_allocated (seg) = shigh + Align (size (shigh)); } // test if the segment is in the range of [slow, shigh] if (!((heap_segment_reserved (seg) >= slow) && (heap_segment_mem (seg) <= shigh))) { heap_segment_saved_allocated (seg) = heap_segment_allocated (seg); // shorten it to minimum heap_segment_allocated (seg) = heap_segment_mem (seg); } seg = heap_segment_next_rw (seg); } while (seg); } else { heap_segment* seg = heap_segment_rw (generation_start_segment (current_gen)); PREFIX_ASSUME(seg != NULL); heap_segment* sseg = seg; do { uint8_t* start_unmarked = heap_segment_mem (seg); #ifndef USE_REGIONS // shorten it to minimum if (seg == sseg) { // no survivors make all generations look empty uint8_t* o = generation_allocation_start (current_gen); o += get_soh_start_obj_len (o); start_unmarked = o; } #endif //!USE_REGIONS #ifdef BACKGROUND_GC if (current_c_gc_state == c_gc_state_marking) { bgc_clear_batch_mark_array_bits (start_unmarked, heap_segment_allocated (seg)); } #endif //BACKGROUND_GC heap_segment_saved_allocated (seg) = heap_segment_allocated (seg); heap_segment_allocated (seg) = start_unmarked; seg = heap_segment_next_rw (seg); } while (seg); } } #endif //MULTIPLE_HEAPS heap_segment* seg1 = heap_segment_rw (generation_start_segment (condemned_gen1)); PREFIX_ASSUME(seg1 != NULL); uint8_t* end = heap_segment_allocated (seg1); uint8_t* first_condemned_address = get_soh_start_object (seg1, condemned_gen1); uint8_t* x = first_condemned_address; #ifdef USE_REGIONS memset (regions_per_gen, 0, sizeof (regions_per_gen)); memset (sip_maxgen_regions_per_gen, 0, sizeof (sip_maxgen_regions_per_gen)); memset (reserved_free_regions_sip, 0, sizeof (reserved_free_regions_sip)); int pinned_survived_region = 0; uint8_t** mark_list_index = nullptr; uint8_t** mark_list_next = nullptr; if (use_mark_list) mark_list_next = get_region_mark_list (x, end, &mark_list_index); #else // USE_REGIONS assert (!marked (x)); uint8_t** mark_list_next = &mark_list[0]; #endif //USE_REGIONS uint8_t* plug_end = x; uint8_t* tree = 0; size_t sequence_number = 0; uint8_t* last_node = 0; size_t current_brick = brick_of (x); BOOL allocate_in_condemned = ((condemned_gen_number == max_generation)|| (settings.promotion == FALSE)); int active_old_gen_number = condemned_gen_number; int active_new_gen_number = (allocate_in_condemned ? condemned_gen_number: (1 + condemned_gen_number)); generation* older_gen = 0; generation* consing_gen = condemned_gen1; alloc_list r_free_list [MAX_SOH_BUCKET_COUNT]; size_t r_free_list_space = 0; size_t r_free_obj_space = 0; size_t r_older_gen_free_list_allocated = 0; size_t r_older_gen_condemned_allocated = 0; size_t r_older_gen_end_seg_allocated = 0; uint8_t* r_allocation_pointer = 0; uint8_t* r_allocation_limit = 0; uint8_t* r_allocation_start_region = 0; heap_segment* r_allocation_segment = 0; #ifdef FREE_USAGE_STATS size_t r_older_gen_free_space[NUM_GEN_POWER2]; #endif //FREE_USAGE_STATS if ((condemned_gen_number < max_generation)) { older_gen = generation_of (min (max_generation, 1 + condemned_gen_number)); generation_allocator (older_gen)->copy_to_alloc_list (r_free_list); r_free_list_space = generation_free_list_space (older_gen); r_free_obj_space = generation_free_obj_space (older_gen); #ifdef FREE_USAGE_STATS memcpy (r_older_gen_free_space, older_gen->gen_free_spaces, sizeof (r_older_gen_free_space)); #endif //FREE_USAGE_STATS generation_allocate_end_seg_p (older_gen) = FALSE; #ifdef DOUBLY_LINKED_FL if (older_gen->gen_num == max_generation) { generation_set_bgc_mark_bit_p (older_gen) = FALSE; generation_last_free_list_allocated (older_gen) = 0; } #endif //DOUBLY_LINKED_FL r_older_gen_free_list_allocated = generation_free_list_allocated (older_gen); r_older_gen_condemned_allocated = generation_condemned_allocated (older_gen); r_older_gen_end_seg_allocated = generation_end_seg_allocated (older_gen); r_allocation_limit = generation_allocation_limit (older_gen); r_allocation_pointer = generation_allocation_pointer (older_gen); r_allocation_start_region = generation_allocation_context_start_region (older_gen); r_allocation_segment = generation_allocation_segment (older_gen); #ifdef USE_REGIONS if (older_gen->gen_num == max_generation) { check_seg_gen_num (r_allocation_segment); } #endif //USE_REGIONS heap_segment* start_seg = heap_segment_rw (generation_start_segment (older_gen)); PREFIX_ASSUME(start_seg != NULL); #ifdef USE_REGIONS heap_segment* skip_seg = 0; assert (generation_allocation_pointer (older_gen) == 0); assert (generation_allocation_limit (older_gen) == 0); #else //USE_REGIONS heap_segment* skip_seg = ephemeral_heap_segment; if (start_seg != ephemeral_heap_segment) { assert (condemned_gen_number == (max_generation - 1)); } #endif //USE_REGIONS if (start_seg != skip_seg) { while (start_seg && (start_seg != skip_seg)) { assert (heap_segment_allocated (start_seg) >= heap_segment_mem (start_seg)); assert (heap_segment_allocated (start_seg) <= heap_segment_reserved (start_seg)); heap_segment_plan_allocated (start_seg) = heap_segment_allocated (start_seg); start_seg = heap_segment_next_rw (start_seg); } } } //reset all of the segment's plan_allocated { int condemned_gen_index1 = get_stop_generation_index (condemned_gen_number); for (; condemned_gen_index1 <= condemned_gen_number; condemned_gen_index1++) { generation* current_gen = generation_of (condemned_gen_index1); heap_segment* seg2 = heap_segment_rw (generation_start_segment (current_gen)); PREFIX_ASSUME(seg2 != NULL); while (seg2) { #ifdef USE_REGIONS regions_per_gen[condemned_gen_index1]++; dprintf (REGIONS_LOG, ("h%d gen%d %Ix-%Ix", heap_number, condemned_gen_index1, heap_segment_mem (seg2), heap_segment_allocated (seg2))); #endif //USE_REGIONS heap_segment_plan_allocated (seg2) = heap_segment_mem (seg2); seg2 = heap_segment_next_rw (seg2); } } } int condemned_gn = condemned_gen_number; int bottom_gen = 0; init_free_and_plug(); while (condemned_gn >= bottom_gen) { generation* condemned_gen2 = generation_of (condemned_gn); generation_allocator (condemned_gen2)->clear(); generation_free_list_space (condemned_gen2) = 0; generation_free_obj_space (condemned_gen2) = 0; generation_allocation_size (condemned_gen2) = 0; generation_condemned_allocated (condemned_gen2) = 0; generation_sweep_allocated (condemned_gen2) = 0; generation_pinned_allocated (condemned_gen2) = 0; generation_free_list_allocated(condemned_gen2) = 0; generation_end_seg_allocated (condemned_gen2) = 0; generation_pinned_allocation_sweep_size (condemned_gen2) = 0; generation_pinned_allocation_compact_size (condemned_gen2) = 0; #ifdef FREE_USAGE_STATS generation_pinned_free_obj_space (condemned_gen2) = 0; generation_allocated_in_pinned_free (condemned_gen2) = 0; generation_allocated_since_last_pin (condemned_gen2) = 0; #endif //FREE_USAGE_STATS #ifndef USE_REGIONS generation_plan_allocation_start (condemned_gen2) = 0; #endif //!USE_REGIONS generation_allocation_segment (condemned_gen2) = heap_segment_rw (generation_start_segment (condemned_gen2)); PREFIX_ASSUME(generation_allocation_segment(condemned_gen2) != NULL); #ifdef USE_REGIONS generation_allocation_pointer (condemned_gen2) = heap_segment_mem (generation_allocation_segment (condemned_gen2)); #else //USE_REGIONS if (generation_start_segment (condemned_gen2) != ephemeral_heap_segment) { generation_allocation_pointer (condemned_gen2) = heap_segment_mem (generation_allocation_segment (condemned_gen2)); } else { generation_allocation_pointer (condemned_gen2) = generation_allocation_start (condemned_gen2); } #endif //USE_REGIONS generation_allocation_limit (condemned_gen2) = generation_allocation_pointer (condemned_gen2); generation_allocation_context_start_region (condemned_gen2) = generation_allocation_pointer (condemned_gen2); condemned_gn--; } BOOL allocate_first_generation_start = FALSE; if (allocate_in_condemned) { allocate_first_generation_start = TRUE; } dprintf(3,( " From %Ix to %Ix", (size_t)x, (size_t)end)); #ifdef USE_REGIONS if (should_sweep_in_plan (seg1)) { sweep_region_in_plan (seg1, use_mark_list, mark_list_next, mark_list_index); x = end; } #else demotion_low = MAX_PTR; demotion_high = heap_segment_allocated (ephemeral_heap_segment); #endif //!USE_REGIONS // If we are doing a gen1 only because of cards, it means we should not demote any pinned plugs // from gen1. They should get promoted to gen2. demote_gen1_p = !(settings.promotion && (settings.condemned_generation == (max_generation - 1)) && gen_to_condemn_reasons.is_only_condition (gen_low_card_p)); total_ephemeral_size = 0; print_free_and_plug ("BP"); #ifndef USE_REGIONS for (int gen_idx = 0; gen_idx <= max_generation; gen_idx++) { generation* temp_gen = generation_of (gen_idx); dprintf (2, ("gen%d start %Ix, plan start %Ix", gen_idx, generation_allocation_start (temp_gen), generation_plan_allocation_start (temp_gen))); } #endif //!USE_REGIONS #ifdef FEATURE_EVENT_TRACE // When verbose level is enabled we want to record some info about gen2 FL usage during gen1 GCs. // We record the bucket info for the largest FL items and plugs that we have to allocate in condemned. bool record_fl_info_p = (EVENT_ENABLED (GCFitBucketInfo) && (condemned_gen_number == (max_generation - 1))); size_t recorded_fl_info_size = 0; if (record_fl_info_p) init_bucket_info(); bool fire_pinned_plug_events_p = EVENT_ENABLED(PinPlugAtGCTime); #endif //FEATURE_EVENT_TRACE size_t last_plug_len = 0; #ifdef DOUBLY_LINKED_FL gen2_removed_no_undo = 0; saved_pinned_plug_index = INVALID_SAVED_PINNED_PLUG_INDEX; #endif //DOUBLY_LINKED_FL while (1) { if (x >= end) { if (!use_mark_list) { assert (x == end); } #ifdef USE_REGIONS if (heap_segment_swept_in_plan (seg1)) { assert (heap_segment_gen_num (seg1) == active_old_gen_number); dynamic_data* dd_active_old = dynamic_data_of (active_old_gen_number); dd_survived_size (dd_active_old) += heap_segment_survived (seg1); dprintf (REGIONS_LOG, ("region %Ix-%Ix SIP", heap_segment_mem (seg1), heap_segment_allocated (seg1))); } else #endif //USE_REGIONS { assert (heap_segment_allocated (seg1) == end); heap_segment_saved_allocated (seg1) = heap_segment_allocated (seg1); heap_segment_allocated (seg1) = plug_end; current_brick = update_brick_table (tree, current_brick, x, plug_end); dprintf (REGIONS_LOG, ("region %Ix-%Ix(%Ix) non SIP", heap_segment_mem (seg1), heap_segment_allocated (seg1), heap_segment_plan_allocated (seg1))); dprintf (3, ("end of seg: new tree, sequence# 0")); sequence_number = 0; tree = 0; } #ifdef USE_REGIONS heap_segment_pinned_survived (seg1) = pinned_survived_region; dprintf (REGIONS_LOG, ("h%d setting seg %Ix pin surv: %Ix", heap_number, heap_segment_mem (seg1), pinned_survived_region)); pinned_survived_region = 0; if (heap_segment_mem (seg1) == heap_segment_allocated (seg1)) { num_regions_freed_in_sweep++; } #endif //USE_REGIONS if (heap_segment_next_rw (seg1)) { seg1 = heap_segment_next_rw (seg1); end = heap_segment_allocated (seg1); plug_end = x = heap_segment_mem (seg1); current_brick = brick_of (x); #ifdef USE_REGIONS if (use_mark_list) mark_list_next = get_region_mark_list (x, end, &mark_list_index); if (should_sweep_in_plan (seg1)) { sweep_region_in_plan (seg1, use_mark_list, mark_list_next, mark_list_index); x = end; } #endif //USE_REGIONS dprintf(3,( " From %Ix to %Ix", (size_t)x, (size_t)end)); continue; } else { #ifdef USE_REGIONS // We have a few task here when we ran out of regions to go through for the // active_old_gen_number - // // + decide on which pins to skip // + set the planned gen for the regions we process here // + set the consing gen's alloc ptr/limit // + decide on the new active_old_gen_number (which is just the current one - 1) // + decide on the new active_new_gen_number (which depends on settings.promotion) // // Important differences between process_last_np_surv_region and process_ephemeral_boundaries // - it's guaranteed we would ask to allocate gen1 start for promotion and gen0 // start for non promotion case. // - consing_gen is never changed. In fact we really don't need consing_gen, we just // need the alloc ptr/limit pair and the alloc seg. // TODO : should just get rid of consing_gen. // These make things more regular and easier to keep track of. // // Also I'm doing everything here instead of having to have separate code to go // through the left over pins after the main loop in plan phase. int saved_active_new_gen_number = active_new_gen_number; BOOL saved_allocate_in_condemned = allocate_in_condemned; dprintf (REGIONS_LOG, ("h%d switching to look at next gen - current active old %d, new %d, alloc_in_condemned: %d", heap_number, active_old_gen_number, active_new_gen_number, allocate_in_condemned)); if (active_old_gen_number <= (settings.promotion ? (max_generation - 1) : max_generation)) { dprintf (REGIONS_LOG, ("h%d active old: %d, new: %d->%d, allocate_in_condemned %d->1", heap_number, active_old_gen_number, active_new_gen_number, (active_new_gen_number - 1), allocate_in_condemned)); active_new_gen_number--; allocate_in_condemned = TRUE; } if (active_new_gen_number >= 0) { process_last_np_surv_region (consing_gen, saved_active_new_gen_number, active_new_gen_number); } if (active_old_gen_number == 0) { // We need to process the pins on the remaining regions if any. process_remaining_regions (active_new_gen_number, consing_gen); break; } else { active_old_gen_number--; seg1 = heap_segment_rw (generation_start_segment (generation_of (active_old_gen_number))); end = heap_segment_allocated (seg1); plug_end = x = heap_segment_mem (seg1); current_brick = brick_of (x); if (use_mark_list) mark_list_next = get_region_mark_list (x, end, &mark_list_index); if (should_sweep_in_plan (seg1)) { sweep_region_in_plan (seg1, use_mark_list, mark_list_next, mark_list_index); x = end; } dprintf (REGIONS_LOG,("h%d switching to gen%d start region %Ix, %Ix-%Ix", heap_number, active_old_gen_number, heap_segment_mem (seg1), x, end)); continue; } #else //USE_REGIONS break; #endif //USE_REGIONS } } BOOL last_npinned_plug_p = FALSE; BOOL last_pinned_plug_p = FALSE; // last_pinned_plug is the beginning of the last pinned plug. If we merge a plug into a pinned // plug we do not change the value of last_pinned_plug. This happens with artificially pinned plugs - // it can be merged with a previous pinned plug and a pinned plug after it can be merged with it. uint8_t* last_pinned_plug = 0; size_t num_pinned_plugs_in_plug = 0; uint8_t* last_object_in_plug = 0; while ((x < end) && marked (x)) { uint8_t* plug_start = x; uint8_t* saved_plug_end = plug_end; BOOL pinned_plug_p = FALSE; BOOL npin_before_pin_p = FALSE; BOOL saved_last_npinned_plug_p = last_npinned_plug_p; uint8_t* saved_last_object_in_plug = last_object_in_plug; BOOL merge_with_last_pin_p = FALSE; size_t added_pinning_size = 0; size_t artificial_pinned_size = 0; store_plug_gap_info (plug_start, plug_end, last_npinned_plug_p, last_pinned_plug_p, last_pinned_plug, pinned_plug_p, last_object_in_plug, merge_with_last_pin_p, last_plug_len); #ifdef FEATURE_STRUCTALIGN int requiredAlignment = ((CObjectHeader*)plug_start)->GetRequiredAlignment(); size_t alignmentOffset = OBJECT_ALIGNMENT_OFFSET; #endif // FEATURE_STRUCTALIGN { uint8_t* xl = x; while ((xl < end) && marked (xl) && (pinned (xl) == pinned_plug_p)) { assert (xl < end); if (pinned(xl)) { clear_pinned (xl); } #ifdef FEATURE_STRUCTALIGN else { int obj_requiredAlignment = ((CObjectHeader*)xl)->GetRequiredAlignment(); if (obj_requiredAlignment > requiredAlignment) { requiredAlignment = obj_requiredAlignment; alignmentOffset = xl - plug_start + OBJECT_ALIGNMENT_OFFSET; } } #endif // FEATURE_STRUCTALIGN clear_marked (xl); dprintf(4, ("+%Ix+", (size_t)xl)); assert ((size (xl) > 0)); assert ((size (xl) <= loh_size_threshold)); last_object_in_plug = xl; xl = xl + Align (size (xl)); Prefetch (xl); } BOOL next_object_marked_p = ((xl < end) && marked (xl)); if (pinned_plug_p) { // If it is pinned we need to extend to the next marked object as we can't use part of // a pinned object to make the artificial gap (unless the last 3 ptr sized words are all // references but for now I am just using the next non pinned object for that). if (next_object_marked_p) { clear_marked (xl); last_object_in_plug = xl; size_t extra_size = Align (size (xl)); xl = xl + extra_size; added_pinning_size = extra_size; } } else { if (next_object_marked_p) npin_before_pin_p = TRUE; } assert (xl <= end); x = xl; } dprintf (3, ( "%Ix[", (size_t)plug_start)); plug_end = x; size_t ps = plug_end - plug_start; last_plug_len = ps; dprintf (3, ( "%Ix[(%Ix)", (size_t)x, ps)); uint8_t* new_address = 0; if (!pinned_plug_p) { if (allocate_in_condemned && (settings.condemned_generation == max_generation) && (ps > OS_PAGE_SIZE)) { ptrdiff_t reloc = plug_start - generation_allocation_pointer (consing_gen); //reloc should >=0 except when we relocate //across segments and the dest seg is higher then the src if ((ps > (8*OS_PAGE_SIZE)) && (reloc > 0) && ((size_t)reloc < (ps/16))) { dprintf (3, ("Pinning %Ix; reloc would have been: %Ix", (size_t)plug_start, reloc)); // The last plug couldn't have been a npinned plug or it would have // included this plug. assert (!saved_last_npinned_plug_p); if (last_pinned_plug) { dprintf (3, ("artificially pinned plug merged with last pinned plug")); merge_with_last_pin_p = TRUE; } else { enque_pinned_plug (plug_start, FALSE, 0); last_pinned_plug = plug_start; } convert_to_pinned_plug (last_npinned_plug_p, last_pinned_plug_p, pinned_plug_p, ps, artificial_pinned_size); } } } #ifndef USE_REGIONS if (allocate_first_generation_start) { allocate_first_generation_start = FALSE; plan_generation_start (condemned_gen1, consing_gen, plug_start); assert (generation_plan_allocation_start (condemned_gen1)); } if (seg1 == ephemeral_heap_segment) { process_ephemeral_boundaries (plug_start, active_new_gen_number, active_old_gen_number, consing_gen, allocate_in_condemned); } #endif //!USE_REGIONS dprintf (3, ("adding %Id to gen%d surv", ps, active_old_gen_number)); dynamic_data* dd_active_old = dynamic_data_of (active_old_gen_number); dd_survived_size (dd_active_old) += ps; BOOL convert_to_pinned_p = FALSE; BOOL allocated_in_older_p = FALSE; if (!pinned_plug_p) { #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN) dd_num_npinned_plugs (dd_active_old)++; #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN add_gen_plug (active_old_gen_number, ps); if (allocate_in_condemned) { verify_pins_with_post_plug_info("before aic"); new_address = allocate_in_condemned_generations (consing_gen, ps, active_old_gen_number, #ifdef SHORT_PLUGS &convert_to_pinned_p, (npin_before_pin_p ? plug_end : 0), seg1, #endif //SHORT_PLUGS plug_start REQD_ALIGN_AND_OFFSET_ARG); verify_pins_with_post_plug_info("after aic"); } else { new_address = allocate_in_older_generation (older_gen, ps, active_old_gen_number, plug_start REQD_ALIGN_AND_OFFSET_ARG); if (new_address != 0) { allocated_in_older_p = TRUE; if (settings.condemned_generation == (max_generation - 1)) { dprintf (3, (" NA: %Ix-%Ix -> %Ix, %Ix (%Ix)", plug_start, plug_end, (size_t)new_address, (size_t)new_address + (plug_end - plug_start), (size_t)(plug_end - plug_start))); } } else { if (generation_allocator(older_gen)->discard_if_no_fit_p()) { allocate_in_condemned = TRUE; } new_address = allocate_in_condemned_generations (consing_gen, ps, active_old_gen_number, #ifdef SHORT_PLUGS &convert_to_pinned_p, (npin_before_pin_p ? plug_end : 0), seg1, #endif //SHORT_PLUGS plug_start REQD_ALIGN_AND_OFFSET_ARG); } } #ifdef FEATURE_EVENT_TRACE if (record_fl_info_p && !allocated_in_older_p) { add_plug_in_condemned_info (older_gen, ps); recorded_fl_info_size += ps; } #endif //FEATURE_EVENT_TRACE if (convert_to_pinned_p) { assert (last_npinned_plug_p != FALSE); assert (last_pinned_plug_p == FALSE); convert_to_pinned_plug (last_npinned_plug_p, last_pinned_plug_p, pinned_plug_p, ps, artificial_pinned_size); enque_pinned_plug (plug_start, FALSE, 0); last_pinned_plug = plug_start; } else { if (!new_address) { //verify that we are at then end of the ephemeral segment assert (generation_allocation_segment (consing_gen) == ephemeral_heap_segment); //verify that we are near the end assert ((generation_allocation_pointer (consing_gen) + Align (ps)) < heap_segment_allocated (ephemeral_heap_segment)); assert ((generation_allocation_pointer (consing_gen) + Align (ps)) > (heap_segment_allocated (ephemeral_heap_segment) + Align (min_obj_size))); } else { dprintf (3, (ThreadStressLog::gcPlanPlugMsg(), (size_t)(node_gap_size (plug_start)), plug_start, plug_end, (size_t)new_address, (size_t)(plug_start - new_address), (size_t)new_address + ps, ps, (is_plug_padded (plug_start) ? 1 : 0), x, (allocated_in_older_p ? "O" : "C"))); #ifdef SHORT_PLUGS if (is_plug_padded (plug_start)) { dprintf (3, ("%Ix was padded", plug_start)); dd_padding_size (dd_active_old) += Align (min_obj_size); } #endif //SHORT_PLUGS } } } if (pinned_plug_p) { #ifdef FEATURE_EVENT_TRACE if (fire_pinned_plug_events_p) { FIRE_EVENT(PinPlugAtGCTime, plug_start, plug_end, (merge_with_last_pin_p ? 0 : (uint8_t*)node_gap_size (plug_start))); } #endif //FEATURE_EVENT_TRACE if (merge_with_last_pin_p) { merge_with_last_pinned_plug (last_pinned_plug, ps); } else { assert (last_pinned_plug == plug_start); set_pinned_info (plug_start, ps, consing_gen); } new_address = plug_start; dprintf (3, (ThreadStressLog::gcPlanPinnedPlugMsg(), (size_t)(node_gap_size (plug_start)), (size_t)plug_start, (size_t)plug_end, ps, (merge_with_last_pin_p ? 1 : 0))); dprintf (3, ("adding %Id to gen%d pinned surv", plug_end - plug_start, active_old_gen_number)); size_t pinned_plug_size = plug_end - plug_start; #ifdef USE_REGIONS pinned_survived_region += (int)pinned_plug_size; #endif //USE_REGIONS dd_pinned_survived_size (dd_active_old) += pinned_plug_size; dd_added_pinned_size (dd_active_old) += added_pinning_size; dd_artificial_pinned_survived_size (dd_active_old) += artificial_pinned_size; if (!demote_gen1_p && (active_old_gen_number == (max_generation - 1))) { last_gen1_pin_end = plug_end; } } #ifdef _DEBUG // detect forward allocation in the same segment assert (!((new_address > plug_start) && (new_address < heap_segment_reserved (seg1)))); #endif //_DEBUG if (!merge_with_last_pin_p) { if (current_brick != brick_of (plug_start)) { current_brick = update_brick_table (tree, current_brick, plug_start, saved_plug_end); sequence_number = 0; tree = 0; } set_node_relocation_distance (plug_start, (new_address - plug_start)); if (last_node && (node_relocation_distance (last_node) == (node_relocation_distance (plug_start) + (ptrdiff_t)node_gap_size (plug_start)))) { //dprintf(3,( " Lb")); dprintf (3, ("%Ix Lb", plug_start)); set_node_left (plug_start); } if (0 == sequence_number) { dprintf (2, ("sn: 0, tree is set to %Ix", plug_start)); tree = plug_start; } verify_pins_with_post_plug_info("before insert node"); tree = insert_node (plug_start, ++sequence_number, tree, last_node); dprintf (3, ("tree is %Ix (b: %Ix) after insert_node(lc: %Ix, rc: %Ix)", tree, brick_of (tree), (tree + node_left_child (tree)), (tree + node_right_child (tree)))); last_node = plug_start; #ifdef _DEBUG // If we detect if the last plug is pinned plug right before us, we should save this gap info if (!pinned_plug_p) { if (mark_stack_tos > 0) { mark& m = mark_stack_array[mark_stack_tos - 1]; if (m.has_post_plug_info()) { uint8_t* post_plug_info_start = m.saved_post_plug_info_start; size_t* current_plug_gap_start = (size_t*)(plug_start - sizeof (plug_and_gap)); if ((uint8_t*)current_plug_gap_start == post_plug_info_start) { dprintf (3, ("Ginfo: %Ix, %Ix, %Ix", *current_plug_gap_start, *(current_plug_gap_start + 1), *(current_plug_gap_start + 2))); memcpy (&(m.saved_post_plug_debug), current_plug_gap_start, sizeof (gap_reloc_pair)); } } } } #endif //_DEBUG verify_pins_with_post_plug_info("after insert node"); } } if (num_pinned_plugs_in_plug > 1) { dprintf (3, ("more than %Id pinned plugs in this plug", num_pinned_plugs_in_plug)); } x = find_next_marked (x, end, use_mark_list, mark_list_next, mark_list_index); } #ifndef USE_REGIONS while (!pinned_plug_que_empty_p()) { if (settings.promotion) { uint8_t* pplug = pinned_plug (oldest_pin()); if (in_range_for_segment (pplug, ephemeral_heap_segment)) { consing_gen = ensure_ephemeral_heap_segment (consing_gen); //allocate all of the generation gaps while (active_new_gen_number > 0) { active_new_gen_number--; if (active_new_gen_number == (max_generation - 1)) { maxgen_pinned_compact_before_advance = generation_pinned_allocation_compact_size (generation_of (max_generation)); if (!demote_gen1_p) advance_pins_for_demotion (consing_gen); } generation* gen = generation_of (active_new_gen_number); plan_generation_start (gen, consing_gen, 0); if (demotion_low == MAX_PTR) { demotion_low = pplug; dprintf (3, ("end plan: dlow->%Ix", demotion_low)); } dprintf (2, ("(%d)gen%d plan start: %Ix", heap_number, active_new_gen_number, (size_t)generation_plan_allocation_start (gen))); assert (generation_plan_allocation_start (gen)); } } } if (pinned_plug_que_empty_p()) break; size_t entry = deque_pinned_plug(); mark* m = pinned_plug_of (entry); uint8_t* plug = pinned_plug (m); size_t len = pinned_len (m); // detect pinned block in different segment (later) than // allocation segment heap_segment* nseg = heap_segment_rw (generation_allocation_segment (consing_gen)); while ((plug < generation_allocation_pointer (consing_gen)) || (plug >= heap_segment_allocated (nseg))) { assert ((plug < heap_segment_mem (nseg)) || (plug > heap_segment_reserved (nseg))); //adjust the end of the segment to be the end of the plug assert (generation_allocation_pointer (consing_gen)>= heap_segment_mem (nseg)); assert (generation_allocation_pointer (consing_gen)<= heap_segment_committed (nseg)); heap_segment_plan_allocated (nseg) = generation_allocation_pointer (consing_gen); //switch allocation segment nseg = heap_segment_next_rw (nseg); generation_allocation_segment (consing_gen) = nseg; //reset the allocation pointer and limits generation_allocation_pointer (consing_gen) = heap_segment_mem (nseg); } set_new_pin_info (m, generation_allocation_pointer (consing_gen)); dprintf (2, ("pin %Ix b: %Ix->%Ix", plug, brick_of (plug), (size_t)(brick_table[brick_of (plug)]))); generation_allocation_pointer (consing_gen) = plug + len; generation_allocation_limit (consing_gen) = generation_allocation_pointer (consing_gen); //Add the size of the pinned plug to the right pinned allocations //find out which gen this pinned plug came from int frgn = object_gennum (plug); if ((frgn != (int)max_generation) && settings.promotion) { generation_pinned_allocation_sweep_size ((generation_of (frgn +1))) += len; } } plan_generation_starts (consing_gen); #endif //!USE_REGIONS descr_generations ("AP"); print_free_and_plug ("AP"); { #ifdef SIMPLE_DPRINTF for (int gen_idx = 0; gen_idx <= max_generation; gen_idx++) { generation* temp_gen = generation_of (gen_idx); dynamic_data* temp_dd = dynamic_data_of (gen_idx); int added_pinning_ratio = 0; int artificial_pinned_ratio = 0; if (dd_pinned_survived_size (temp_dd) != 0) { added_pinning_ratio = (int)((float)dd_added_pinned_size (temp_dd) * 100 / (float)dd_pinned_survived_size (temp_dd)); artificial_pinned_ratio = (int)((float)dd_artificial_pinned_survived_size (temp_dd) * 100 / (float)dd_pinned_survived_size (temp_dd)); } size_t padding_size = #ifdef SHORT_PLUGS dd_padding_size (temp_dd); #else 0; #endif //SHORT_PLUGS dprintf (1, ("gen%d: NON PIN alloc: %Id, pin com: %Id, sweep: %Id, surv: %Id, pinsurv: %Id(%d%% added, %d%% art), np surv: %Id, pad: %Id", gen_idx, generation_allocation_size (temp_gen), generation_pinned_allocation_compact_size (temp_gen), generation_pinned_allocation_sweep_size (temp_gen), dd_survived_size (temp_dd), dd_pinned_survived_size (temp_dd), added_pinning_ratio, artificial_pinned_ratio, (dd_survived_size (temp_dd) - dd_pinned_survived_size (temp_dd)), padding_size)); #ifndef USE_REGIONS dprintf (1, ("gen%d: %Ix, %Ix(%Id)", gen_idx, generation_allocation_start (temp_gen), generation_plan_allocation_start (temp_gen), (size_t)(generation_plan_allocation_start (temp_gen) - generation_allocation_start (temp_gen)))); #endif //USE_REGIONS } #endif //SIMPLE_DPRINTF } if (settings.condemned_generation == (max_generation - 1 )) { generation* older_gen = generation_of (settings.condemned_generation + 1); size_t rejected_free_space = generation_free_obj_space (older_gen) - r_free_obj_space; size_t free_list_allocated = generation_free_list_allocated (older_gen) - r_older_gen_free_list_allocated; size_t end_seg_allocated = generation_end_seg_allocated (older_gen) - r_older_gen_end_seg_allocated; size_t condemned_allocated = generation_condemned_allocated (older_gen) - r_older_gen_condemned_allocated; size_t growth = end_seg_allocated + condemned_allocated; if (growth > 0) { dprintf (1, ("gen2 grew %Id (end seg alloc: %Id, condemned alloc: %Id", growth, end_seg_allocated, condemned_allocated)); maxgen_size_inc_p = true; } else { dprintf (2, ("gen2 didn't grow (end seg alloc: %Id, , condemned alloc: %Id, gen1 c alloc: %Id", end_seg_allocated, condemned_allocated, generation_condemned_allocated (generation_of (max_generation - 1)))); } dprintf (1, ("older gen's free alloc: %Id->%Id, seg alloc: %Id->%Id, condemned alloc: %Id->%Id", r_older_gen_free_list_allocated, generation_free_list_allocated (older_gen), r_older_gen_end_seg_allocated, generation_end_seg_allocated (older_gen), r_older_gen_condemned_allocated, generation_condemned_allocated (older_gen))); dprintf (1, ("this GC did %Id free list alloc(%Id bytes free space rejected)", free_list_allocated, rejected_free_space)); maxgen_size_increase* maxgen_size_info = &(get_gc_data_per_heap()->maxgen_size_info); maxgen_size_info->free_list_allocated = free_list_allocated; maxgen_size_info->free_list_rejected = rejected_free_space; maxgen_size_info->end_seg_allocated = end_seg_allocated; maxgen_size_info->condemned_allocated = condemned_allocated; maxgen_size_info->pinned_allocated = maxgen_pinned_compact_before_advance; maxgen_size_info->pinned_allocated_advance = generation_pinned_allocation_compact_size (generation_of (max_generation)) - maxgen_pinned_compact_before_advance; #ifdef FREE_USAGE_STATS int free_list_efficiency = 0; if ((free_list_allocated + rejected_free_space) != 0) free_list_efficiency = (int)(((float) (free_list_allocated) / (float)(free_list_allocated + rejected_free_space)) * (float)100); int running_free_list_efficiency = (int)(generation_allocator_efficiency(older_gen)*100); dprintf (1, ("gen%d free list alloc effi: %d%%, current effi: %d%%", older_gen->gen_num, free_list_efficiency, running_free_list_efficiency)); dprintf (1, ("gen2 free list change")); for (int j = 0; j < NUM_GEN_POWER2; j++) { dprintf (1, ("[h%d][#%Id]: 2^%d: F: %Id->%Id(%Id), P: %Id", heap_number, settings.gc_index, (j + 10), r_older_gen_free_space[j], older_gen->gen_free_spaces[j], (ptrdiff_t)(r_older_gen_free_space[j] - older_gen->gen_free_spaces[j]), (generation_of(max_generation - 1))->gen_plugs[j])); } #endif //FREE_USAGE_STATS } size_t fragmentation = generation_fragmentation (generation_of (condemned_gen_number), consing_gen, heap_segment_allocated (ephemeral_heap_segment)); dprintf (2,("Fragmentation: %Id", fragmentation)); dprintf (2,("---- End of Plan phase ----")); // We may update write barrier code. We assume here EE has been suspended if we are on a GC thread. assert(IsGCInProgress()); BOOL should_expand = FALSE; BOOL should_compact= FALSE; ephemeral_promotion = FALSE; #ifdef HOST_64BIT if ((!settings.concurrent) && #ifdef USE_REGIONS !special_sweep_p && #endif //USE_REGIONS !provisional_mode_triggered && ((condemned_gen_number < max_generation) && ((settings.gen0_reduction_count > 0) || (settings.entry_memory_load >= 95)))) { dprintf (GTC_LOG, ("gen0 reduction count is %d, condemning %d, mem load %d", settings.gen0_reduction_count, condemned_gen_number, settings.entry_memory_load)); should_compact = TRUE; get_gc_data_per_heap()->set_mechanism (gc_heap_compact, ((settings.gen0_reduction_count > 0) ? compact_fragmented_gen0 : compact_high_mem_load)); #ifndef USE_REGIONS if ((condemned_gen_number >= (max_generation - 1)) && dt_low_ephemeral_space_p (tuning_deciding_expansion)) { dprintf (GTC_LOG, ("Not enough space for all ephemeral generations with compaction")); should_expand = TRUE; } #endif //!USE_REGIONS } else #endif // HOST_64BIT { should_compact = decide_on_compacting (condemned_gen_number, fragmentation, should_expand); } #ifdef FEATURE_LOH_COMPACTION loh_compacted_p = FALSE; #endif //FEATURE_LOH_COMPACTION if (condemned_gen_number == max_generation) { #ifdef FEATURE_LOH_COMPACTION if (settings.loh_compaction) { if (plan_loh()) { should_compact = TRUE; get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_loh_forced); loh_compacted_p = TRUE; } } else { if ((heap_number == 0) && (loh_pinned_queue)) { loh_pinned_queue_decay--; if (!loh_pinned_queue_decay) { delete loh_pinned_queue; loh_pinned_queue = 0; } } } if (!loh_compacted_p) #endif //FEATURE_LOH_COMPACTION { GCToEEInterface::DiagWalkUOHSurvivors(__this, loh_generation); sweep_uoh_objects (loh_generation); } GCToEEInterface::DiagWalkUOHSurvivors(__this, poh_generation); sweep_uoh_objects (poh_generation); } else { settings.loh_compaction = FALSE; } #ifdef MULTIPLE_HEAPS new_heap_segment = NULL; if (should_compact && should_expand) gc_policy = policy_expand; else if (should_compact) gc_policy = policy_compact; else gc_policy = policy_sweep; //vote for result of should_compact dprintf (3, ("Joining for compaction decision")); gc_t_join.join(this, gc_join_decide_on_compaction); if (gc_t_join.joined()) { //safe place to delete large heap segments if (condemned_gen_number == max_generation) { for (int i = 0; i < n_heaps; i++) { g_heaps [i]->rearrange_uoh_segments (); } } if (maxgen_size_inc_p && provisional_mode_triggered #ifdef BACKGROUND_GC && !is_bgc_in_progress() #endif //BACKGROUND_GC ) { pm_trigger_full_gc = true; dprintf (GTC_LOG, ("in PM: maxgen size inc, doing a sweeping gen1 and trigger NGC2")); } else { #ifndef USE_REGIONS settings.demotion = FALSE; #endif //!USE_REGIONS int pol_max = policy_sweep; #ifdef GC_CONFIG_DRIVEN BOOL is_compaction_mandatory = FALSE; #endif //GC_CONFIG_DRIVEN int i; for (i = 0; i < n_heaps; i++) { if (pol_max < g_heaps[i]->gc_policy) pol_max = policy_compact; #ifndef USE_REGIONS // set the demotion flag is any of the heap has demotion if (g_heaps[i]->demotion_high >= g_heaps[i]->demotion_low) { (g_heaps[i]->get_gc_data_per_heap())->set_mechanism_bit (gc_demotion_bit); settings.demotion = TRUE; } #endif //!USE_REGIONS #ifdef GC_CONFIG_DRIVEN if (!is_compaction_mandatory) { int compact_reason = (g_heaps[i]->get_gc_data_per_heap())->get_mechanism (gc_heap_compact); if (compact_reason >= 0) { if (gc_heap_compact_reason_mandatory_p[compact_reason]) is_compaction_mandatory = TRUE; } } #endif //GC_CONFIG_DRIVEN } #ifdef GC_CONFIG_DRIVEN if (!is_compaction_mandatory) { // If compaction is not mandatory we can feel free to change it to a sweeping GC. // Note that we may want to change this to only checking every so often instead of every single GC. if (should_do_sweeping_gc (pol_max >= policy_compact)) { pol_max = policy_sweep; } else { if (pol_max == policy_sweep) pol_max = policy_compact; } } #endif //GC_CONFIG_DRIVEN for (i = 0; i < n_heaps; i++) { if (pol_max > g_heaps[i]->gc_policy) g_heaps[i]->gc_policy = pol_max; #ifndef USE_REGIONS //get the segment while we are serialized if (g_heaps[i]->gc_policy == policy_expand) { g_heaps[i]->new_heap_segment = g_heaps[i]->soh_get_segment_to_expand(); if (!g_heaps[i]->new_heap_segment) { set_expand_in_full_gc (condemned_gen_number); //we are out of memory, cancel the expansion g_heaps[i]->gc_policy = policy_compact; } } #endif //!USE_REGIONS } BOOL is_full_compacting_gc = FALSE; if ((gc_policy >= policy_compact) && (condemned_gen_number == max_generation)) { full_gc_counts[gc_type_compacting]++; is_full_compacting_gc = TRUE; } for (i = 0; i < n_heaps; i++) { //copy the card and brick tables if (g_gc_card_table!= g_heaps[i]->card_table) { g_heaps[i]->copy_brick_card_table(); } if (is_full_compacting_gc) { g_heaps[i]->loh_alloc_since_cg = 0; } } } #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { gc_time_info[time_sweep] = GetHighPrecisionTimeStamp(); gc_time_info[time_plan] = gc_time_info[time_sweep] - gc_time_info[time_plan]; } #endif //FEATURE_EVENT_TRACE dprintf(3, ("Starting all gc threads after compaction decision")); gc_t_join.restart(); } should_compact = (gc_policy >= policy_compact); should_expand = (gc_policy >= policy_expand); #else //MULTIPLE_HEAPS //safe place to delete large heap segments if (condemned_gen_number == max_generation) { rearrange_uoh_segments (); } if (maxgen_size_inc_p && provisional_mode_triggered #ifdef BACKGROUND_GC && !is_bgc_in_progress() #endif //BACKGROUND_GC ) { pm_trigger_full_gc = true; dprintf (GTC_LOG, ("in PM: maxgen size inc, doing a sweeping gen1 and trigger NGC2")); } else { #ifndef USE_REGIONS // for regions it was already set when we set plan_gen_num for regions. settings.demotion = ((demotion_high >= demotion_low) ? TRUE : FALSE); if (settings.demotion) get_gc_data_per_heap()->set_mechanism_bit (gc_demotion_bit); #endif //!USE_REGIONS #ifdef GC_CONFIG_DRIVEN BOOL is_compaction_mandatory = FALSE; int compact_reason = get_gc_data_per_heap()->get_mechanism (gc_heap_compact); if (compact_reason >= 0) is_compaction_mandatory = gc_heap_compact_reason_mandatory_p[compact_reason]; if (!is_compaction_mandatory) { if (should_do_sweeping_gc (should_compact)) should_compact = FALSE; else should_compact = TRUE; } #endif //GC_CONFIG_DRIVEN if (should_compact && (condemned_gen_number == max_generation)) { full_gc_counts[gc_type_compacting]++; loh_alloc_since_cg = 0; } } #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { gc_time_info[time_sweep] = GetHighPrecisionTimeStamp(); gc_time_info[time_plan] = gc_time_info[time_sweep] - gc_time_info[time_plan]; } #endif //FEATURE_EVENT_TRACE #endif //MULTIPLE_HEAPS if (!pm_trigger_full_gc && pm_stress_on && provisional_mode_triggered) { if ((settings.condemned_generation == (max_generation - 1)) && ((settings.gc_index % 5) == 0) #ifdef BACKGROUND_GC && !is_bgc_in_progress() #endif //BACKGROUND_GC ) { pm_trigger_full_gc = true; } } if (settings.condemned_generation == (max_generation - 1)) { if (provisional_mode_triggered) { if (should_expand) { should_expand = FALSE; dprintf (GTC_LOG, ("h%d in PM cannot expand", heap_number)); } } if (pm_trigger_full_gc) { should_compact = FALSE; dprintf (GTC_LOG, ("h%d PM doing sweeping", heap_number)); } } if (should_compact) { dprintf (2,( "**** Doing Compacting GC ****")); #if defined(USE_REGIONS) && defined(BACKGROUND_GC) if (should_update_end_mark_size()) { background_soh_size_end_mark += generation_end_seg_allocated (older_gen) - r_older_gen_end_seg_allocated; } #endif //USE_REGIONS && BACKGROUND_GC #ifndef USE_REGIONS if (should_expand) { #ifndef MULTIPLE_HEAPS heap_segment* new_heap_segment = soh_get_segment_to_expand(); #endif //!MULTIPLE_HEAPS if (new_heap_segment) { consing_gen = expand_heap(condemned_gen_number, consing_gen, new_heap_segment); } // If we couldn't get a new segment, or we were able to // reserve one but no space to commit, we couldn't // expand heap. if (ephemeral_heap_segment != new_heap_segment) { set_expand_in_full_gc (condemned_gen_number); should_expand = FALSE; } } #endif //!USE_REGIONS generation_allocation_limit (condemned_gen1) = generation_allocation_pointer (condemned_gen1); if ((condemned_gen_number < max_generation)) { generation_allocator (older_gen)->commit_alloc_list_changes(); // Fix the allocation area of the older generation fix_older_allocation_area (older_gen); #ifdef FEATURE_EVENT_TRACE if (record_fl_info_p) { // For plugs allocated in condemned we kept track of each one but only fire the // event for buckets with non zero items. uint16_t non_zero_buckets = 0; for (uint16_t bucket_index = 0; bucket_index < NUM_GEN2_ALIST; bucket_index++) { if (bucket_info[bucket_index].count != 0) { if (bucket_index != non_zero_buckets) { bucket_info[non_zero_buckets].set (bucket_index, bucket_info[bucket_index].count, bucket_info[bucket_index].size); } else { bucket_info[bucket_index].index = bucket_index; } non_zero_buckets++; } } if (non_zero_buckets) { FIRE_EVENT(GCFitBucketInfo, (uint16_t)etw_bucket_kind::plugs_in_condemned, recorded_fl_info_size, non_zero_buckets, (uint32_t)(sizeof (etw_bucket_info)), (void *)bucket_info); init_bucket_info(); } // We want to get an idea of the sizes of free items in the top 25% of the free list // for gen2 (to be accurate - we stop as soon as the size we count exceeds 25%. This // is just so that if we have a really big free item we will still count that one). // The idea is we want to see if they all in a few big ones or many smaller ones? // To limit the amount of time we spend counting, we stop till we have counted the // top percentage, or exceeded max_etw_item_count items. size_t max_size_to_count = generation_free_list_space (older_gen) / 4; non_zero_buckets = generation_allocator (older_gen)->count_largest_items (bucket_info, max_size_to_count, max_etw_item_count, &recorded_fl_info_size); if (non_zero_buckets) { FIRE_EVENT(GCFitBucketInfo, (uint16_t)etw_bucket_kind::largest_fl_items, recorded_fl_info_size, non_zero_buckets, (uint32_t)(sizeof (etw_bucket_info)), (void *)bucket_info); } } #endif //FEATURE_EVENT_TRACE } #ifndef USE_REGIONS assert (generation_allocation_segment (consing_gen) == ephemeral_heap_segment); #endif //!USE_REGIONS GCToEEInterface::DiagWalkSurvivors(__this, true); relocate_phase (condemned_gen_number, first_condemned_address); compact_phase (condemned_gen_number, first_condemned_address, (!settings.demotion && settings.promotion)); fix_generation_bounds (condemned_gen_number, consing_gen); assert (generation_allocation_limit (youngest_generation) == generation_allocation_pointer (youngest_generation)); #ifndef USE_REGIONS if (condemned_gen_number >= (max_generation -1)) { #ifdef MULTIPLE_HEAPS // this needs be serialized just because we have one // segment_standby_list/seg_table for all heaps. We should make it at least // so that when hoarding is not on we don't need this join because // decommitting memory can take a long time. //must serialize on deleting segments gc_t_join.join(this, gc_join_rearrange_segs_compaction); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { uint64_t current_time = GetHighPrecisionTimeStamp(); gc_time_info[time_compact] = current_time - gc_time_info[time_compact]; } #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { g_heaps[i]->rearrange_heap_segments(TRUE); } #else //MULTIPLE_HEAPS rearrange_heap_segments(TRUE); #endif //MULTIPLE_HEAPS #ifdef MULTIPLE_HEAPS gc_t_join.restart(); #endif //MULTIPLE_HEAPS } if (should_expand) { //fix the start_segment for the ephemeral generations for (int i = 0; i < max_generation; i++) { generation* gen = generation_of (i); generation_start_segment (gen) = ephemeral_heap_segment; generation_allocation_segment (gen) = ephemeral_heap_segment; } } } #endif //!USE_REGIONS { #ifdef MULTIPLE_HEAPS dprintf(3, ("Joining after end of compaction")); gc_t_join.join(this, gc_join_adjust_handle_age_compact); if (gc_t_join.joined()) { #endif //MULTIPLE_HEAPS #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p && (condemned_gen_number < (max_generation -1))) { uint64_t current_time = GetHighPrecisionTimeStamp(); gc_time_info[time_compact] = current_time - gc_time_info[time_compact]; } #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS //join all threads to make sure they are synchronized dprintf(3, ("Restarting after Promotion granted")); gc_t_join.restart(); } #endif //MULTIPLE_HEAPS #ifdef FEATURE_PREMORTEM_FINALIZATION finalize_queue->UpdatePromotedGenerations (condemned_gen_number, (!settings.demotion && settings.promotion)); #endif // FEATURE_PREMORTEM_FINALIZATION ScanContext sc; sc.thread_number = heap_number; sc.promotion = FALSE; sc.concurrent = FALSE; // new generations bounds are set can call this guy if (settings.promotion && !settings.demotion) { dprintf (2, ("Promoting EE roots for gen %d", condemned_gen_number)); GCScan::GcPromotionsGranted(condemned_gen_number, max_generation, &sc); } else if (settings.demotion) { dprintf (2, ("Demoting EE roots for gen %d", condemned_gen_number)); GCScan::GcDemote (condemned_gen_number, max_generation, &sc); } } { reset_pinned_queue_bos(); #ifndef USE_REGIONS unsigned int gen_number = min (max_generation, 1 + condemned_gen_number); generation* gen = generation_of (gen_number); uint8_t* low = generation_allocation_start (generation_of (gen_number-1)); uint8_t* high = heap_segment_allocated (ephemeral_heap_segment); #endif //!USE_REGIONS while (!pinned_plug_que_empty_p()) { mark* m = pinned_plug_of (deque_pinned_plug()); size_t len = pinned_len (m); uint8_t* arr = (pinned_plug (m) - len); dprintf(3,("free [%Ix %Ix[ pin", (size_t)arr, (size_t)arr + len)); if (len != 0) { assert (len >= Align (min_obj_size)); make_unused_array (arr, len); // fix fully contained bricks + first one // if the array goes beyond the first brick size_t start_brick = brick_of (arr); size_t end_brick = brick_of (arr + len); if (end_brick != start_brick) { dprintf (3, ("Fixing bricks [%Ix, %Ix[ to point to unused array %Ix", start_brick, end_brick, (size_t)arr)); set_brick (start_brick, arr - brick_address (start_brick)); size_t brick = start_brick+1; while (brick < end_brick) { set_brick (brick, start_brick - brick); brick++; } } #ifdef USE_REGIONS int gen_number = object_gennum_plan (arr); generation* gen = generation_of (gen_number); #else //when we take an old segment to make the new //ephemeral segment. we can have a bunch of //pinned plugs out of order going to the new ephemeral seg //and then the next plugs go back to max_generation if ((heap_segment_mem (ephemeral_heap_segment) <= arr) && (heap_segment_reserved (ephemeral_heap_segment) > arr)) { while ((low <= arr) && (high > arr)) { gen_number--; assert ((gen_number >= 1) || (demotion_low != MAX_PTR) || settings.demotion || !settings.promotion); dprintf (3, ("new free list generation %d", gen_number)); gen = generation_of (gen_number); if (gen_number >= 1) low = generation_allocation_start (generation_of (gen_number-1)); else low = high; } } else { dprintf (3, ("new free list generation %d", max_generation)); gen_number = max_generation; gen = generation_of (gen_number); } #endif //USE_REGIONS dprintf(3,("h%d threading %Ix (%Id) before pin in gen %d", heap_number, arr, len, gen_number)); thread_gap (arr, len, gen); add_gen_free (gen_number, len); } } } clear_gen1_cards(); } else { //force promotion for sweep settings.promotion = TRUE; settings.compaction = FALSE; #ifdef USE_REGIONS // This should be set for segs too actually. We should always reset demotion // if we sweep. settings.demotion = FALSE; #endif //USE_REGIONS ScanContext sc; sc.thread_number = heap_number; sc.promotion = FALSE; sc.concurrent = FALSE; dprintf (2, ("**** Doing Mark and Sweep GC****")); if ((condemned_gen_number < max_generation)) { #ifdef FREE_USAGE_STATS memcpy (older_gen->gen_free_spaces, r_older_gen_free_space, sizeof (r_older_gen_free_space)); #endif //FREE_USAGE_STATS generation_allocator (older_gen)->copy_from_alloc_list (r_free_list); generation_free_list_space (older_gen) = r_free_list_space; generation_free_obj_space (older_gen) = r_free_obj_space; #ifdef DOUBLY_LINKED_FL if (condemned_gen_number == (max_generation - 1)) { dprintf (2, ("[h%d] no undo, FL %Id-%Id -> %Id, FO %Id+%Id=%Id", heap_number, generation_free_list_space (older_gen), gen2_removed_no_undo, (generation_free_list_space (older_gen) - gen2_removed_no_undo), generation_free_obj_space (older_gen), gen2_removed_no_undo, (generation_free_obj_space (older_gen) + gen2_removed_no_undo))); generation_free_list_space (older_gen) -= gen2_removed_no_undo; generation_free_obj_space (older_gen) += gen2_removed_no_undo; } #endif //DOUBLY_LINKED_FL generation_free_list_allocated (older_gen) = r_older_gen_free_list_allocated; generation_end_seg_allocated (older_gen) = r_older_gen_end_seg_allocated; generation_condemned_allocated (older_gen) = r_older_gen_condemned_allocated; generation_sweep_allocated (older_gen) += dd_survived_size (dynamic_data_of (condemned_gen_number)); generation_allocation_limit (older_gen) = r_allocation_limit; generation_allocation_pointer (older_gen) = r_allocation_pointer; generation_allocation_context_start_region (older_gen) = r_allocation_start_region; generation_allocation_segment (older_gen) = r_allocation_segment; #ifdef USE_REGIONS if (older_gen->gen_num == max_generation) { check_seg_gen_num (r_allocation_segment); } #endif //USE_REGIONS } if ((condemned_gen_number < max_generation)) { // Fix the allocation area of the older generation fix_older_allocation_area (older_gen); } GCToEEInterface::DiagWalkSurvivors(__this, false); make_free_lists (condemned_gen_number); size_t total_recovered_sweep_size = recover_saved_pinned_info(); if (total_recovered_sweep_size > 0) { generation_free_obj_space (generation_of (max_generation)) -= total_recovered_sweep_size; dprintf (2, ("h%d: deduct %Id for pin, fo->%Id", heap_number, total_recovered_sweep_size, generation_free_obj_space (generation_of (max_generation)))); } #ifdef MULTIPLE_HEAPS dprintf(3, ("Joining after end of sweep")); gc_t_join.join(this, gc_join_adjust_handle_age_sweep); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { uint64_t current_time = GetHighPrecisionTimeStamp(); gc_time_info[time_sweep] = current_time - gc_time_info[time_sweep]; } #endif //FEATURE_EVENT_TRACE if (!special_sweep_p) { GCScan::GcPromotionsGranted(condemned_gen_number, max_generation, &sc); } #ifndef USE_REGIONS if (condemned_gen_number >= (max_generation -1)) { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { g_heaps[i]->rearrange_heap_segments(FALSE); } #else rearrange_heap_segments(FALSE); #endif //MULTIPLE_HEAPS } #endif //!USE_REGIONS #ifdef MULTIPLE_HEAPS //join all threads to make sure they are synchronized dprintf(3, ("Restarting after Promotion granted")); gc_t_join.restart(); #endif //MULTIPLE_HEAPS } #ifdef FEATURE_PREMORTEM_FINALIZATION if (!special_sweep_p) { finalize_queue->UpdatePromotedGenerations (condemned_gen_number, TRUE); } #endif // FEATURE_PREMORTEM_FINALIZATION if (!special_sweep_p) { clear_gen1_cards(); } } //verify_partial(); } #ifdef _PREFAST_ #pragma warning(pop) #endif //_PREFAST_ /***************************** Called after compact phase to fix all generation gaps ********************************/ void gc_heap::fix_generation_bounds (int condemned_gen_number, generation* consing_gen) { #ifndef _DEBUG UNREFERENCED_PARAMETER(consing_gen); #endif //_DEBUG int gen_number = condemned_gen_number; dprintf (2, ("---- thread regions gen%d GC ----", gen_number)); #ifdef USE_REGIONS // For ephemeral GCs, we handle up till the generation_allocation_segment as that's the last one we // changed in the older gen. if (settings.promotion && (condemned_gen_number < max_generation)) { int older_gen_number = condemned_gen_number + 1; generation* older_gen = generation_of (older_gen_number); heap_segment* last_alloc_region = generation_allocation_segment (older_gen); dprintf (REGIONS_LOG, ("fix till we see alloc region which is %Ix", heap_segment_mem (last_alloc_region))); heap_segment* region = heap_segment_rw (generation_start_segment (older_gen)); while (region) { heap_segment_allocated (region) = heap_segment_plan_allocated (region); if (region == last_alloc_region) break; region = heap_segment_next (region); } } thread_final_regions (true); ephemeral_heap_segment = generation_start_segment (generation_of (0)); alloc_allocated = heap_segment_allocated (ephemeral_heap_segment); #else //USE_REGIONS assert (generation_allocation_segment (consing_gen) == ephemeral_heap_segment); int bottom_gen = 0; while (gen_number >= bottom_gen) { generation* gen = generation_of (gen_number); dprintf(3,("Fixing generation pointers for %Ix", gen_number)); if ((gen_number < max_generation) && ephemeral_promotion) { size_t saved_eph_start_size = saved_ephemeral_plan_start_size[gen_number]; make_unused_array (saved_ephemeral_plan_start[gen_number], saved_eph_start_size); generation_free_obj_space (generation_of (max_generation)) += saved_eph_start_size; dprintf (2, ("[h%d] EP %Ix(%Id)", heap_number, saved_ephemeral_plan_start[gen_number], saved_ephemeral_plan_start_size[gen_number])); } reset_allocation_pointers (gen, generation_plan_allocation_start (gen)); make_unused_array (generation_allocation_start (gen), generation_plan_allocation_start_size (gen)); dprintf(3,(" start %Ix", (size_t)generation_allocation_start (gen))); gen_number--; } #ifdef MULTIPLE_HEAPS if (ephemeral_promotion) { //we are creating a generation fault. set the cards. // and we are only doing this for multiple heaps because in the single heap scenario the // new ephemeral generations will be empty and there'll be no need to set cards for the // old ephemeral generations that got promoted into max_generation. ptrdiff_t delta = 0; heap_segment* old_ephemeral_seg = seg_mapping_table_segment_of (saved_ephemeral_plan_start[max_generation-1]); assert (in_range_for_segment (saved_ephemeral_plan_start[max_generation-1], old_ephemeral_seg)); size_t end_card = card_of (align_on_card (heap_segment_plan_allocated (old_ephemeral_seg))); size_t card = card_of (saved_ephemeral_plan_start[max_generation-1]); while (card != end_card) { set_card (card); card++; } } #endif //MULTIPLE_HEAPS #ifdef BACKGROUND_GC if (should_update_end_mark_size()) { background_soh_size_end_mark = generation_size (max_generation); } #endif //BACKGROUND_GC #endif //!USE_REGIONS { alloc_allocated = heap_segment_plan_allocated(ephemeral_heap_segment); //reset the allocated size #ifdef _DEBUG uint8_t* start = get_soh_start_object (ephemeral_heap_segment, youngest_generation); if (settings.promotion && !settings.demotion) { assert ((start + get_soh_start_obj_len (start)) == heap_segment_plan_allocated(ephemeral_heap_segment)); } #endif //_DEBUG heap_segment_allocated(ephemeral_heap_segment)= heap_segment_plan_allocated(ephemeral_heap_segment); } } #ifndef USE_REGIONS uint8_t* gc_heap::generation_limit (int gen_number) { if (settings.promotion) { if (gen_number <= 1) return heap_segment_reserved (ephemeral_heap_segment); else return generation_allocation_start (generation_of ((gen_number - 2))); } else { if (gen_number <= 0) return heap_segment_reserved (ephemeral_heap_segment); else return generation_allocation_start (generation_of ((gen_number - 1))); } } #endif //!USE_REGIONS BOOL gc_heap::ensure_gap_allocation (int condemned_gen_number) { #ifndef USE_REGIONS uint8_t* start = heap_segment_allocated (ephemeral_heap_segment); size_t size = Align (min_obj_size)*(condemned_gen_number+1); assert ((start + size) <= heap_segment_reserved (ephemeral_heap_segment)); if ((start + size) > heap_segment_committed (ephemeral_heap_segment)) { if (!grow_heap_segment (ephemeral_heap_segment, start + size)) { return FALSE; } } #endif //USE_REGIONS return TRUE; } uint8_t* gc_heap::allocate_at_end (size_t size) { uint8_t* start = heap_segment_allocated (ephemeral_heap_segment); size = Align (size); uint8_t* result = start; // only called to allocate a min obj so can't overflow here. assert ((start + size) <= heap_segment_reserved (ephemeral_heap_segment)); //ensure_gap_allocation took care of it assert ((start + size) <= heap_segment_committed (ephemeral_heap_segment)); heap_segment_allocated (ephemeral_heap_segment) += size; return result; } #ifdef USE_REGIONS // Find the first non empty region and also does the following in the process - // + decommit end of region if it's not a gen0 region; // + set the region gen_num to the new one; // // For empty regions, we always return empty regions to free unless it's a gen // start region. Note that I'm returning gen0 empty regions as well, however, // returning a region to free does not decommit. // // If this is called for a compacting GC, we know we always take the planned generation // on the region (and set the new allocated); else this is called for sweep in which case // it's more complicated - // // + if we are in the special sweep mode, we don't change the old gen number at all // + if we are not in special sweep we need to promote all regions, including the SIP ones // because we make the assumption that this is the case for sweep for handles. heap_segment* gc_heap::find_first_valid_region (heap_segment* region, bool compact_p) { check_seg_gen_num (generation_allocation_segment (generation_of (max_generation))); dprintf (REGIONS_LOG, (" FFVR region %Ix(%Ix), gen%d", (size_t)region, (region ? heap_segment_mem (region) : 0), (region ? heap_segment_gen_num (region) : 0))); if (!region) return 0; heap_segment* current_region = region; do { int gen_num = heap_segment_gen_num (current_region); int plan_gen_num = -1; if (compact_p) { assert (settings.compaction); plan_gen_num = heap_segment_plan_gen_num (current_region); dprintf (REGIONS_LOG, (" gen%d->%d", gen_num, plan_gen_num)); } else { plan_gen_num = (special_sweep_p ? gen_num : get_plan_gen_num (gen_num)); dprintf (REGIONS_LOG, (" gen%d->%d, special_sweep_p %d, swept_in_plan %d", gen_num, plan_gen_num, (int)special_sweep_p, (int)heap_segment_swept_in_plan (current_region))); } uint8_t* allocated = (compact_p ? heap_segment_plan_allocated (current_region) : heap_segment_allocated (current_region)); if (heap_segment_mem (current_region) == allocated) { heap_segment* region_to_delete = current_region; current_region = heap_segment_next (current_region); return_free_region (region_to_delete); dprintf (REGIONS_LOG, (" h%d gen%d return region %Ix to free, current->%Ix(%Ix)", heap_number, gen_num, heap_segment_mem (region_to_delete), current_region, (current_region ? heap_segment_mem (current_region) : 0))); if (!current_region) return 0; } else { if (compact_p) { dprintf (REGIONS_LOG, (" gen%d setting region %Ix alloc %Ix to plan %Ix", gen_num, heap_segment_mem (current_region), heap_segment_allocated (current_region), heap_segment_plan_allocated (current_region))); if (heap_segment_swept_in_plan (current_region)) { assert (heap_segment_allocated (current_region) == heap_segment_plan_allocated (current_region)); } else { heap_segment_allocated (current_region) = heap_segment_plan_allocated (current_region); } } else { // Set this so we keep plan gen and gen the same. set_region_plan_gen_num (current_region, plan_gen_num); } if (gen_num >= soh_gen2) { dprintf (REGIONS_LOG, (" gen%d decommit end of region %Ix(%Ix)", gen_num, current_region, heap_segment_mem (current_region))); decommit_heap_segment_pages (current_region, 0); } dprintf (REGIONS_LOG, (" set region %Ix(%Ix) gen num to %d", current_region, heap_segment_mem (current_region), plan_gen_num)); set_region_gen_num (current_region, plan_gen_num); break; } } while (current_region); assert (current_region); if (heap_segment_swept_in_plan (current_region)) { int gen_num = heap_segment_gen_num (current_region); dprintf (REGIONS_LOG, ("threading SIP region %Ix surv %Id onto gen%d", heap_segment_mem (current_region), heap_segment_survived (current_region), gen_num)); generation* gen = generation_of (gen_num); generation_allocator (gen)->thread_sip_fl (current_region); generation_free_list_space (gen) += heap_segment_free_list_size (current_region); generation_free_obj_space (gen) += heap_segment_free_obj_size (current_region); } // Take this opportunity to make sure all the regions left with flags only for this GC are reset. heap_segment_swept_in_plan (current_region) = false; current_region->flags &= ~heap_segment_flags_demoted; return current_region; } void gc_heap::thread_final_regions (bool compact_p) { for (int i = 0; i < max_generation; i++) { if (reserved_free_regions_sip[i]) { return_free_region (reserved_free_regions_sip[i]); } } int condemned_gen_number = settings.condemned_generation; generation_region_info generation_final_regions[max_generation + 1]; memset (generation_final_regions, 0, sizeof (generation_final_regions)); // Step 1: we initialize all the regions for generations we are not condemning with their // current head and tail as we know these regions will for sure exist. for (int gen_idx = max_generation; gen_idx > condemned_gen_number; gen_idx--) { generation* gen = generation_of (gen_idx); // Note this needs to be the first rw region as we will not be changing any ro regions and // we will work on thread rw regions here. generation_final_regions[gen_idx].head = heap_segment_rw (generation_start_segment (gen)); generation_final_regions[gen_idx].tail = generation_tail_region (gen); } #ifdef BACKGROUND_GC heap_segment* max_gen_tail_region = 0; if (should_update_end_mark_size()) { max_gen_tail_region = generation_final_regions[max_generation].tail; } #endif //BACKGROUND_GC // Step 2: for each region in the condemned generations, we thread it onto its planned generation // in our generation_final_regions array. for (int gen_idx = condemned_gen_number; gen_idx >= 0; gen_idx--) { heap_segment* current_region = heap_segment_rw (generation_start_segment (generation_of (gen_idx))); dprintf (REGIONS_LOG, ("gen%d start from %Ix", gen_idx, heap_segment_mem (current_region))); while ((current_region = find_first_valid_region (current_region, compact_p))) { assert (!compact_p || (heap_segment_plan_gen_num (current_region) == heap_segment_gen_num (current_region))); int new_gen_num = heap_segment_plan_gen_num (current_region); generation* new_gen = generation_of (new_gen_num); heap_segment* next_region = heap_segment_next (current_region); if (generation_final_regions[new_gen_num].head) { assert (generation_final_regions[new_gen_num].tail); // The new gen already exists, just thread this region onto it. dprintf (REGIONS_LOG, ("gen%d exists, tail region %Ix next -> %Ix", new_gen_num, heap_segment_mem (generation_final_regions[new_gen_num].tail), heap_segment_mem (current_region))); heap_segment_next (generation_final_regions[new_gen_num].tail) = current_region; generation_final_regions[new_gen_num].tail = current_region; } else { generation_final_regions[new_gen_num].head = current_region; generation_final_regions[new_gen_num].tail = current_region; } current_region = next_region; } } // Step 3: all the tail regions' next needs to be set to 0. for (int gen_idx = 0; gen_idx <= max_generation; gen_idx++) { generation* gen = generation_of (gen_idx); if (generation_final_regions[gen_idx].tail) { heap_segment_next (generation_final_regions[gen_idx].tail) = 0; //if (heap_segment_next (generation_final_regions[gen_idx].tail) != 0) //{ // dprintf (REGIONS_LOG, ("tail->next is %Ix", // heap_segment_next (generation_final_regions[gen_idx].tail))); // GCToOSInterface::DebugBreak(); //} } } #ifdef BACKGROUND_GC if (max_gen_tail_region) { max_gen_tail_region = heap_segment_next (max_gen_tail_region); while (max_gen_tail_region) { background_soh_size_end_mark += heap_segment_allocated (max_gen_tail_region) - heap_segment_mem (max_gen_tail_region); max_gen_tail_region = heap_segment_next (max_gen_tail_region); } } #endif //BACKGROUND_GC // Step 4: if a generation doesn't have any regions, we need to get a new one for it; // otherwise we just set the head region as the start region for that generation. for (int gen_idx = 0; gen_idx <= max_generation; gen_idx++) { bool condemned_p = (gen_idx <= condemned_gen_number); assert (condemned_p || generation_final_regions[gen_idx].head); generation* gen = generation_of (gen_idx); heap_segment* start_region = 0; if (generation_final_regions[gen_idx].head) { if (condemned_p) { start_region = generation_final_regions[gen_idx].head; thread_start_region (gen, start_region); } generation_tail_region (gen) = generation_final_regions[gen_idx].tail; dprintf (REGIONS_LOG, ("setting gen%d start %Ix, tail %Ix", gen_idx, heap_segment_mem (heap_segment_rw (generation_start_segment (gen))), heap_segment_mem (generation_tail_region (gen)))); } else { start_region = get_free_region (gen_idx); thread_start_region (gen, start_region); dprintf (REGIONS_LOG, ("creating new gen%d at %Ix", gen_idx, heap_segment_mem (start_region))); } if (condemned_p) { uint8_t* gen_start = heap_segment_mem (start_region); reset_allocation_pointers (gen, gen_start); } } verify_regions (true, false); } void gc_heap::thread_start_region (generation* gen, heap_segment* region) { heap_segment* prev_region = generation_tail_ro_region (gen); if (prev_region) { heap_segment_next (prev_region) = region; dprintf (REGIONS_LOG,("gen%d tail ro %Ix(%Ix) next -> %Ix(%Ix)", gen->gen_num, (size_t)prev_region, heap_segment_mem (prev_region), (size_t)region, heap_segment_mem (region))); } else { generation_start_segment (gen) = region; dprintf (REGIONS_LOG, ("start region of gen%d -> %Ix(%Ix)", gen->gen_num, (size_t)region, heap_segment_mem (region))); } dprintf (REGIONS_LOG, ("tail region of gen%d -> %Ix(%Ix)", gen->gen_num, (size_t)region, heap_segment_mem (region))); generation_tail_region (gen) = region; } heap_segment* gc_heap::get_new_region (int gen_number, size_t size) { heap_segment* new_region = get_free_region (gen_number, size); if (new_region) { switch (gen_number) { default: assert ((new_region->flags & (heap_segment_flags_loh | heap_segment_flags_poh)) == 0); break; case loh_generation: new_region->flags |= heap_segment_flags_loh; break; case poh_generation: new_region->flags |= heap_segment_flags_poh; break; } generation* gen = generation_of (gen_number); heap_segment_next (generation_tail_region (gen)) = new_region; generation_tail_region (gen) = new_region; verify_regions (gen_number, false, settings.concurrent); } return new_region; } heap_segment* gc_heap::allocate_new_region (gc_heap* hp, int gen_num, bool uoh_p, size_t size) { uint8_t* start = 0; uint8_t* end = 0; // size parameter should be non-zero only for large regions assert (uoh_p || size == 0); // REGIONS TODO: allocate POH regions on the right bool allocated_p = (uoh_p ? global_region_allocator.allocate_large_region (&start, &end, allocate_forward, size, on_used_changed) : global_region_allocator.allocate_basic_region (&start, &end, on_used_changed)); if (!allocated_p) { return 0; } heap_segment* res = make_heap_segment (start, (end - start), hp, gen_num); dprintf (REGIONS_LOG, ("got a new region %Ix %Ix->%Ix", (size_t)res, start, end)); if (res == nullptr) { global_region_allocator.delete_region (start); } return res; } void gc_heap::update_start_tail_regions (generation* gen, heap_segment* region_to_delete, heap_segment* prev_region, heap_segment* next_region) { if (region_to_delete == heap_segment_rw (generation_start_segment (gen))) { assert (!prev_region); heap_segment* tail_ro_region = generation_tail_ro_region (gen); if (tail_ro_region) { heap_segment_next (tail_ro_region) = next_region; dprintf (REGIONS_LOG, ("gen%d tail ro %Ix(%Ix) next updated to %Ix(%Ix)", gen->gen_num, (size_t)tail_ro_region, heap_segment_mem (tail_ro_region), (size_t)next_region, heap_segment_mem (next_region))); } else { generation_start_segment (gen) = next_region; dprintf (REGIONS_LOG, ("start region of gen%d updated to %Ix(%Ix)", gen->gen_num, (size_t)next_region, heap_segment_mem (next_region))); } } if (region_to_delete == generation_tail_region (gen)) { assert (!next_region); generation_tail_region (gen) = prev_region; dprintf (REGIONS_LOG, ("tail region of gen%d updated to %Ix(%Ix)", gen->gen_num, (size_t)prev_region, heap_segment_mem (prev_region))); } verify_regions (false, settings.concurrent); } // There's one complication with deciding whether we can make a region SIP or not - if the plan_gen_num of // a generation is not maxgen, and if we want to make every region in that generation maxgen, we need to // make sure we can get a new region for this generation so we can guarantee each generation has at least // one region. If we can't get a new region, we need to make sure we leave at least one region in that gen // to guarantee our invariant. // // This new region we get needs to be temporarily recorded instead of being on the free_regions list because // we can't use it for other purposes. inline bool gc_heap::should_sweep_in_plan (heap_segment* region) { bool sip_p = false; int gen_num = get_region_gen_num (region); int new_gen_num = get_plan_gen_num (gen_num); heap_segment_swept_in_plan (region) = false; dprintf (REGIONS_LOG, ("checking if region %Ix should be SIP", heap_segment_mem (region))); #ifdef STRESS_REGIONS // Only do this for testing or it would keep too much swept. if (0) { num_condemned_regions++; if ((num_condemned_regions % sip_seg_interval) == 0) { set_region_plan_gen_num (region, new_gen_num); sip_p = true; } if ((num_condemned_regions % sip_seg_maxgen_interval) == 0) { set_region_plan_gen_num (region, max_generation); sip_maxgen_regions_per_gen[gen_num]++; sip_p = true; } } else #endif //STRESS_REGIONS { size_t basic_region_size = (size_t)1 << min_segment_size_shr; assert (heap_segment_gen_num (region) == heap_segment_plan_gen_num (region)); int surv_ratio = (int)(((double)heap_segment_survived (region) * 100.0) / (double)basic_region_size); dprintf (2222, ("SSIP: region %Ix surv %Id / %Id = %d%%(%d)", heap_segment_mem (region), heap_segment_survived (region), basic_region_size, surv_ratio, sip_surv_ratio_th)); if (surv_ratio >= sip_surv_ratio_th) { set_region_plan_gen_num (region, new_gen_num); sip_p = true; } if (new_gen_num < max_generation) { int old_card_surv_ratio = (int)(((double)heap_segment_old_card_survived (region) * 100.0) / (double)basic_region_size); dprintf (2222, ("SSIP: region %Ix old card surv %Id / %Id = %d%%(%d)", heap_segment_mem (region), heap_segment_old_card_survived (region), basic_region_size, old_card_surv_ratio, sip_surv_ratio_th)); if (old_card_surv_ratio >= sip_old_card_surv_ratio_th) { set_region_plan_gen_num (region, max_generation); sip_maxgen_regions_per_gen[gen_num]++; sip_p = true; } } } if (sip_p) { num_sip_regions++; if ((new_gen_num < max_generation) && (sip_maxgen_regions_per_gen[gen_num] == regions_per_gen[gen_num])) { assert (get_region_gen_num (region) == 0); assert (new_gen_num < max_generation); heap_segment* reserved_free_region = get_free_region (gen_num); if (reserved_free_region) { dprintf (REGIONS_LOG, ("all regions in gen%d -> SIP 2, get a new region for it %Ix", gen_num, heap_segment_mem (reserved_free_region))); reserved_free_regions_sip[gen_num] = reserved_free_region; } else { // If we cannot get another region, simply revert our decision. sip_maxgen_regions_per_gen[gen_num]--; set_region_plan_gen_num (region, new_gen_num); } } } dprintf (REGIONS_LOG, ("region %Ix %s SIP", heap_segment_mem (region), (sip_p ? "is" : "is not"))); return sip_p; } void heap_segment::thread_free_obj (uint8_t* obj, size_t s) { //dprintf (REGIONS_LOG, ("threading SIP free obj %Ix-%Ix(%Id)", obj, (obj + s), s)); if (s >= min_free_list) { free_list_slot (obj) = 0; if (free_list_head) { assert (free_list_tail); free_list_slot (free_list_tail) = obj; } else { free_list_head = obj; } free_list_tail = obj; free_list_size += s; } else { free_obj_size += s; } } // For a region that we sweep in plan, we need to do the following - // // + set the swept_in_plan_p for this region. // + update allocated for this region. // + build bricks. // + build free objects. We keep a list of them which will then be threaded onto the appropriate generation's // free list. This can be optimized, both gen0 and gen2 GCs are easy to handle - need to see how easy it is // to handle gen1 GCs as the commit/repair there is complicated. // // in plan_phase we also need to make sure to not call update_brick_table when handling end of this region, // and the plan gen num is set accordingly. void gc_heap::sweep_region_in_plan (heap_segment* region, BOOL use_mark_list, uint8_t**& mark_list_next, uint8_t** mark_list_index) { heap_segment_swept_in_plan (region) = true; region->init_free_list(); uint8_t* x = heap_segment_mem (region); uint8_t* last_marked_obj_start = 0; uint8_t* last_marked_obj_end = 0; uint8_t* end = heap_segment_allocated (region); dprintf (2222, ("h%d region %Ix->%Ix SIP, gen %d->%d, %s mark list(%Ix->%Ix, %Ix->%Ix)", heap_number, x, end, heap_segment_gen_num (region), heap_segment_plan_gen_num (region), (use_mark_list ? "using" : "not using"), (uint8_t*)mark_list_next, (mark_list_next ? *mark_list_next : 0), (uint8_t*)mark_list_index, (mark_list_index ? *mark_list_index : 0))); #ifdef _DEBUG size_t survived = 0; uint8_t* saved_last_unmarked_obj_start = 0; uint8_t* saved_last_unmarked_obj_end = 0; size_t saved_obj_brick = 0; size_t saved_next_obj_brick = 0; #endif //_DEBUG while (x < end) { uint8_t* obj = x; size_t obj_brick = (size_t)obj / brick_size; uint8_t* next_obj = 0; if (marked (obj)) { if (pinned(obj)) { clear_pinned (obj); } clear_marked (obj); size_t s = size (obj); next_obj = obj + Align (s); last_marked_obj_start = obj; last_marked_obj_end = next_obj; #ifdef _DEBUG survived += s; #endif //_DEBUG dprintf (4444, ("M: %Ix-%Ix(%Id)", obj, next_obj, s)); } else { next_obj = find_next_marked (x, end, use_mark_list, mark_list_next, mark_list_index); #ifdef _DEBUG saved_last_unmarked_obj_start = obj; saved_last_unmarked_obj_end = next_obj; #endif //_DEBUG if ((next_obj > obj) && (next_obj != end)) { size_t free_obj_size = next_obj - obj; make_unused_array (obj, free_obj_size); region->thread_free_obj (obj, free_obj_size); dprintf (4444, ("UM threading: %Ix-%Ix(%Id)", obj, next_obj, (next_obj - obj))); } } size_t next_obj_brick = (size_t)next_obj / brick_size; #ifdef _DEBUG saved_obj_brick = obj_brick; saved_next_obj_brick = next_obj_brick; #endif //_DEBUG if (next_obj_brick != obj_brick) { fix_brick_to_highest (obj, next_obj); } x = next_obj; } if (last_marked_obj_start) { // We only need to make sure we fix the brick the last marked object's end is in. // Note this brick could have been fixed already. size_t last_marked_obj_start_b = brick_of (last_marked_obj_start); size_t last_marked_obj_end_b = brick_of (last_marked_obj_end - 1); dprintf (REGIONS_LOG, ("last live obj %Ix(%Ix)-%Ix, fixing its brick(s) %Ix-%Ix", last_marked_obj_start, method_table (last_marked_obj_start), last_marked_obj_end, last_marked_obj_start_b, last_marked_obj_end_b)); if (last_marked_obj_start_b == last_marked_obj_end_b) { set_brick (last_marked_obj_start_b, (last_marked_obj_start - brick_address (last_marked_obj_start_b))); } else { set_brick (last_marked_obj_end_b, (last_marked_obj_start_b - last_marked_obj_end_b)); } } else { last_marked_obj_end = heap_segment_mem (region); } #ifdef _DEBUG size_t region_index = get_basic_region_index_for_address (heap_segment_mem (region)); dprintf (REGIONS_LOG, ("region #%d %Ix survived %Id, %s recorded %Id", region_index, heap_segment_mem (region), survived, ((survived == heap_segment_survived (region)) ? "same as" : "diff from"), heap_segment_survived (region))); #ifdef MULTIPLE_HEAPS assert (survived <= (size_t)heap_segment_survived (region)); #else assert (survived == (size_t)heap_segment_survived (region)); #endif //MULTIPLE_HEAPS #endif //_DEBUG assert (last_marked_obj_end); heap_segment_saved_allocated (region) = heap_segment_allocated (region); heap_segment_allocated (region) = last_marked_obj_end; heap_segment_plan_allocated (region) = heap_segment_allocated (region); int plan_gen_num = heap_segment_plan_gen_num (region); generation_allocation_size (generation_of (plan_gen_num)) += heap_segment_survived (region); dprintf (REGIONS_LOG, ("sip: g%d alloc size is now %Id", plan_gen_num, generation_allocation_size (generation_of (plan_gen_num)))); } inline void gc_heap::check_demotion_helper_sip (uint8_t** pval, int parent_gen_num, uint8_t* parent_loc) { uint8_t* child_object = *pval; if (!is_in_heap_range (child_object)) return; if (!child_object) return; int child_object_plan_gen = get_region_plan_gen_num (child_object); if (child_object_plan_gen < parent_gen_num) { set_card (card_of (parent_loc)); } dprintf (3, ("SCS %d, %d", child_object_plan_gen, parent_gen_num)); } heap_segment* gc_heap::relocate_advance_to_non_sip (heap_segment* region) { THREAD_FROM_HEAP; heap_segment* current_region = region; dprintf (REGIONS_LOG, ("Relocate searching for next non SIP, starting from %Ix", (region ? heap_segment_mem (region) : 0))); while (current_region) { if (heap_segment_swept_in_plan (current_region)) { int gen_num = heap_segment_gen_num (current_region); int plan_gen_num = heap_segment_plan_gen_num (current_region); bool use_sip_demotion = (plan_gen_num > get_plan_gen_num (gen_num)); dprintf (REGIONS_LOG, ("region %Ix is SIP, relocating, gen %d, plan gen: %d(supposed to be %d) %s", heap_segment_mem (current_region), gen_num, plan_gen_num, get_plan_gen_num (gen_num), (use_sip_demotion ? "Sd" : "d"))); uint8_t* x = heap_segment_mem (current_region); uint8_t* end = heap_segment_allocated (current_region); // For SIP regions, we go linearly in the region and relocate each object's references. while (x < end) { size_t s = size (x); assert (s > 0); uint8_t* next_obj = x + Align (s); Prefetch (next_obj); if (!(((CObjectHeader*)x)->IsFree())) { //relocate_obj_helper (x, s); if (contain_pointers (x)) { dprintf (3, ("$%Ix$", (size_t)x)); go_through_object_nostart (method_table(x), x, s, pval, { uint8_t* child = *pval; //reloc_survivor_helper (pval); relocate_address (pval THREAD_NUMBER_ARG); if (use_sip_demotion) check_demotion_helper_sip (pval, plan_gen_num, (uint8_t*)pval); else check_demotion_helper (pval, (uint8_t*)pval); if (child) { dprintf (4444, ("SIP %Ix(%Ix)->%Ix->%Ix(%Ix)", x, (uint8_t*)pval, child, *pval, method_table (child))); } }); } check_class_object_demotion (x); } x = next_obj; } } else { int gen_num = heap_segment_gen_num (current_region); int plan_gen_num = heap_segment_plan_gen_num (current_region); dprintf (REGIONS_LOG, ("region %Ix is not SIP, relocating, gen %d, plan gen: %d", heap_segment_mem (current_region), gen_num, plan_gen_num)); return current_region; } current_region = heap_segment_next (current_region); } return 0; } #ifdef STRESS_REGIONS void gc_heap::pin_by_gc (uint8_t* object) { heap_segment* region = region_of (object); HndAssignHandleGC(pinning_handles_for_alloc[ph_index_per_heap], object); dprintf (REGIONS_LOG, ("h%d pinning object at %Ix on eph seg %Ix (ph#%d)", heap_number, object, heap_segment_mem (region), ph_index_per_heap)); ph_index_per_heap++; if (ph_index_per_heap == PINNING_HANDLE_INITIAL_LENGTH) { ph_index_per_heap = 0; } } #endif //STRESS_REGIONS #endif //USE_REGIONS void gc_heap::make_free_lists (int condemned_gen_number) { //Promotion has to happen in sweep case. assert (settings.promotion); make_free_args args; int stop_gen_idx = get_stop_generation_index (condemned_gen_number); for (int i = condemned_gen_number; i >= stop_gen_idx; i--) { generation* condemned_gen = generation_of (i); heap_segment* current_heap_segment = get_start_segment (condemned_gen); #ifdef USE_REGIONS if (!current_heap_segment) continue; #endif //USE_REGIONS uint8_t* start_address = get_soh_start_object (current_heap_segment, condemned_gen); size_t current_brick = brick_of (start_address); PREFIX_ASSUME(current_heap_segment != NULL); uint8_t* end_address = heap_segment_allocated (current_heap_segment); size_t end_brick = brick_of (end_address-1); int current_gen_num = i; args.free_list_gen_number = (special_sweep_p ? current_gen_num : get_plan_gen_num (current_gen_num)); args.free_list_gen = generation_of (args.free_list_gen_number); args.highest_plug = 0; #ifdef USE_REGIONS dprintf (REGIONS_LOG, ("starting at gen%d %Ix -> %Ix", i, start_address, end_address)); #else assert (!special_sweep_p); args.current_gen_limit = (((current_gen_num == max_generation)) ? MAX_PTR : (generation_limit (args.free_list_gen_number))); #endif //USE_REGIONS #ifndef USE_REGIONS if ((start_address >= end_address) && (condemned_gen_number < max_generation)) { break; } #endif //!USE_REGIONS while (1) { if ((current_brick > end_brick)) { #ifndef USE_REGIONS if (args.current_gen_limit == MAX_PTR) { //We had an empty segment //need to allocate the generation start generation* gen = generation_of (max_generation); heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(start_seg != NULL); uint8_t* gap = heap_segment_mem (start_seg); generation_allocation_start (gen) = gap; heap_segment_allocated (start_seg) = gap + Align (min_obj_size); make_unused_array (gap, Align (min_obj_size)); reset_allocation_pointers (gen, gap); dprintf (3, ("Start segment empty, fixing generation start of %d to: %Ix", max_generation, (size_t)gap)); args.current_gen_limit = generation_limit (args.free_list_gen_number); } #endif //!USE_REGIONS if (heap_segment_next_non_sip (current_heap_segment)) { current_heap_segment = heap_segment_next_non_sip (current_heap_segment); } else { break; } current_brick = brick_of (heap_segment_mem (current_heap_segment)); end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1); continue; } { int brick_entry = brick_table [ current_brick ]; if ((brick_entry >= 0)) { make_free_list_in_brick (brick_address (current_brick) + brick_entry-1, &args); dprintf(3,("Fixing brick entry %Ix to %Ix", current_brick, (size_t)args.highest_plug)); set_brick (current_brick, (args.highest_plug - brick_address (current_brick))); } else { if ((brick_entry > -32768)) { #ifdef _DEBUG ptrdiff_t offset = brick_of (args.highest_plug) - current_brick; if ((brick_entry != -32767) && (! ((offset == brick_entry)))) { assert ((brick_entry == -1)); } #endif //_DEBUG //init to -1 for faster find_first_object set_brick (current_brick, -1); } } } current_brick++; } } { #ifdef USE_REGIONS check_seg_gen_num (generation_allocation_segment (generation_of (max_generation))); thread_final_regions (false); generation* gen_gen0 = generation_of (0); ephemeral_heap_segment = generation_start_segment (gen_gen0); alloc_allocated = heap_segment_allocated (ephemeral_heap_segment); // Since we didn't compact, we should recalculate the end_gen0_region_space. end_gen0_region_space = get_gen0_end_space(); #else //USE_REGIONS int bottom_gen = 0; args.free_list_gen_number--; while (args.free_list_gen_number >= bottom_gen) { uint8_t* gap = 0; generation* gen2 = generation_of (args.free_list_gen_number); gap = allocate_at_end (Align(min_obj_size)); generation_allocation_start (gen2) = gap; reset_allocation_pointers (gen2, gap); dprintf(3,("Fixing generation start of %d to: %Ix", args.free_list_gen_number, (size_t)gap)); PREFIX_ASSUME(gap != NULL); make_unused_array (gap, Align (min_obj_size)); args.free_list_gen_number--; } //reset the allocated size uint8_t* start2 = generation_allocation_start (youngest_generation); alloc_allocated = start2 + Align (size (start2)); #endif //USE_REGIONS } } void gc_heap::make_free_list_in_brick (uint8_t* tree, make_free_args* args) { assert ((tree != NULL)); { int right_node = node_right_child (tree); int left_node = node_left_child (tree); args->highest_plug = 0; if (! (0 == tree)) { if (! (0 == left_node)) { make_free_list_in_brick (tree + left_node, args); } { uint8_t* plug = tree; size_t gap_size = node_gap_size (tree); uint8_t* gap = (plug - gap_size); args->highest_plug = tree; dprintf (3,("plug: %Ix (highest p: %Ix), free %Ix len %Id in %d", plug, args->highest_plug, (size_t)gap, gap_size, args->free_list_gen_number)); #ifdef SHORT_PLUGS if (is_plug_padded (plug)) { dprintf (3, ("%Ix padded", plug)); clear_plug_padded (plug); } #endif //SHORT_PLUGS #ifdef DOUBLY_LINKED_FL // These 2 checks should really just be merged into one. if (is_plug_bgc_mark_bit_set (plug)) { dprintf (3333, ("cbgcm: %Ix", plug)); clear_plug_bgc_mark_bit (plug); } if (is_free_obj_in_compact_bit_set (plug)) { dprintf (3333, ("cfoc: %Ix", plug)); clear_free_obj_in_compact_bit (plug); } #endif //DOUBLY_LINKED_FL #ifndef USE_REGIONS gen_crossing: { if ((args->current_gen_limit == MAX_PTR) || ((plug >= args->current_gen_limit) && ephemeral_pointer_p (plug))) { dprintf(3,(" Crossing Generation boundary at %Ix", (size_t)args->current_gen_limit)); if (!(args->current_gen_limit == MAX_PTR)) { args->free_list_gen_number--; args->free_list_gen = generation_of (args->free_list_gen_number); } dprintf(3,( " Fixing generation start of %d to: %Ix", args->free_list_gen_number, (size_t)gap)); reset_allocation_pointers (args->free_list_gen, gap); args->current_gen_limit = generation_limit (args->free_list_gen_number); if ((gap_size >= (2*Align (min_obj_size)))) { dprintf(3,(" Splitting the gap in two %Id left", gap_size)); make_unused_array (gap, Align(min_obj_size)); gap_size = (gap_size - Align(min_obj_size)); gap = (gap + Align(min_obj_size)); } else { make_unused_array (gap, gap_size); gap_size = 0; } goto gen_crossing; } } #endif //!USE_REGIONS thread_gap (gap, gap_size, args->free_list_gen); add_gen_free (args->free_list_gen->gen_num, gap_size); } if (! (0 == right_node)) { make_free_list_in_brick (tree + right_node, args); } } } } void gc_heap::thread_gap (uint8_t* gap_start, size_t size, generation* gen) { #ifndef USE_REGIONS assert (generation_allocation_start (gen)); #endif if ((size > 0)) { #ifndef USE_REGIONS assert ((heap_segment_rw (generation_start_segment (gen)) != ephemeral_heap_segment) || (gap_start > generation_allocation_start (gen))); #endif //USE_REGIONS // The beginning of a segment gap is not aligned assert (size >= Align (min_obj_size)); make_unused_array (gap_start, size, (!settings.concurrent && (gen != youngest_generation)), (gen->gen_num == max_generation)); dprintf (3, ("fr: [%Ix, %Ix[", (size_t)gap_start, (size_t)gap_start+size)); if ((size >= min_free_list)) { generation_free_list_space (gen) += size; generation_allocator (gen)->thread_item (gap_start, size); } else { generation_free_obj_space (gen) += size; } } } void gc_heap::uoh_thread_gap_front (uint8_t* gap_start, size_t size, generation* gen) { #ifndef USE_REGIONS assert (generation_allocation_start (gen)); #endif if (size >= min_free_list) { generation_free_list_space (gen) += size; generation_allocator (gen)->thread_item_front (gap_start, size); } } void gc_heap::make_unused_array (uint8_t* x, size_t size, BOOL clearp, BOOL resetp) { dprintf (3, (ThreadStressLog::gcMakeUnusedArrayMsg(), (size_t)x, (size_t)(x+size))); assert (size >= Align (min_obj_size)); //#if defined (VERIFY_HEAP) && defined (BACKGROUND_GC) // check_batch_mark_array_bits (x, x+size); //#endif //VERIFY_HEAP && BACKGROUND_GC if (resetp) { #ifdef BGC_SERVO_TUNING // Don't do this for servo tuning because it makes it even harder to regulate WS. if (!(bgc_tuning::enable_fl_tuning && bgc_tuning::fl_tuning_triggered)) #endif //BGC_SERVO_TUNING { reset_memory (x, size); } } ((CObjectHeader*)x)->SetFree(size); #ifdef HOST_64BIT #if BIGENDIAN #error "This won't work on big endian platforms" #endif size_t size_as_object = (uint32_t)(size - free_object_base_size) + free_object_base_size; if (size_as_object < size) { // // If the size is more than 4GB, we need to create multiple objects because of // the Array::m_NumComponents is uint32_t and the high 32 bits of unused array // size is ignored in regular object size computation. // uint8_t * tmp = x + size_as_object; size_t remaining_size = size - size_as_object; while (remaining_size > UINT32_MAX) { // Make sure that there will be at least Align(min_obj_size) left size_t current_size = UINT32_MAX - get_alignment_constant (FALSE) - Align (min_obj_size, get_alignment_constant (FALSE)); ((CObjectHeader*)tmp)->SetFree(current_size); remaining_size -= current_size; tmp += current_size; } ((CObjectHeader*)tmp)->SetFree(remaining_size); } #endif if (clearp) clear_card_for_addresses (x, x + Align(size)); } // Clear memory set by make_unused_array. void gc_heap::clear_unused_array (uint8_t* x, size_t size) { // Also clear the sync block *(((PTR_PTR)x)-1) = 0; ((CObjectHeader*)x)->UnsetFree(); #ifdef HOST_64BIT #if BIGENDIAN #error "This won't work on big endian platforms" #endif // The memory could have been cleared in the meantime. We have to mirror the algorithm // from make_unused_array since we cannot depend on the object sizes in memory. size_t size_as_object = (uint32_t)(size - free_object_base_size) + free_object_base_size; if (size_as_object < size) { uint8_t * tmp = x + size_as_object; size_t remaining_size = size - size_as_object; while (remaining_size > UINT32_MAX) { size_t current_size = UINT32_MAX - get_alignment_constant (FALSE) - Align (min_obj_size, get_alignment_constant (FALSE)); ((CObjectHeader*)tmp)->UnsetFree(); remaining_size -= current_size; tmp += current_size; } ((CObjectHeader*)tmp)->UnsetFree(); } #else UNREFERENCED_PARAMETER(size); #endif } inline uint8_t* tree_search (uint8_t* tree, uint8_t* old_address) { uint8_t* candidate = 0; int cn; while (1) { if (tree < old_address) { if ((cn = node_right_child (tree)) != 0) { assert (candidate < tree); candidate = tree; tree = tree + cn; Prefetch (tree - 8); continue; } else break; } else if (tree > old_address) { if ((cn = node_left_child (tree)) != 0) { tree = tree + cn; Prefetch (tree - 8); continue; } else break; } else break; } if (tree <= old_address) return tree; else if (candidate) return candidate; else return tree; } #ifdef FEATURE_BASICFREEZE bool gc_heap::frozen_object_p (Object* obj) { heap_segment* seg = seg_mapping_table_segment_of ((uint8_t*)obj); return heap_segment_read_only_p (seg); } #endif // FEATURE_BASICFREEZE void gc_heap::relocate_address (uint8_t** pold_address THREAD_NUMBER_DCL) { uint8_t* old_address = *pold_address; #ifdef USE_REGIONS if (!is_in_heap_range (old_address) || !should_check_brick_for_reloc (old_address)) { return; } #else //USE_REGIONS if (!((old_address >= gc_low) && (old_address < gc_high))) #ifdef MULTIPLE_HEAPS { UNREFERENCED_PARAMETER(thread); if (old_address == 0) return; gc_heap* hp = heap_of (old_address); if ((hp == this) || !((old_address >= hp->gc_low) && (old_address < hp->gc_high))) return; } #else //MULTIPLE_HEAPS return ; #endif //MULTIPLE_HEAPS #endif //USE_REGIONS // delta translates old_address into address_gc (old_address); size_t brick = brick_of (old_address); int brick_entry = brick_table [ brick ]; uint8_t* new_address = old_address; if (! ((brick_entry == 0))) { retry: { while (brick_entry < 0) { brick = (brick + brick_entry); brick_entry = brick_table [ brick ]; } uint8_t* old_loc = old_address; uint8_t* node = tree_search ((brick_address (brick) + brick_entry-1), old_loc); if ((node <= old_loc)) new_address = (old_address + node_relocation_distance (node)); else { if (node_left_p (node)) { dprintf(3,(" L: %Ix", (size_t)node)); new_address = (old_address + (node_relocation_distance (node) + node_gap_size (node))); } else { brick = brick - 1; brick_entry = brick_table [ brick ]; goto retry; } } } dprintf (4, (ThreadStressLog::gcRelocateReferenceMsg(), pold_address, old_address, new_address)); *pold_address = new_address; return; } #ifdef FEATURE_LOH_COMPACTION if (settings.loh_compaction) { heap_segment* pSegment = seg_mapping_table_segment_of ((uint8_t*)old_address); #ifdef USE_REGIONS // pSegment could be 0 for regions, see comment for is_in_condemned. if (!pSegment) { return; } #endif //USE_REGIONS #ifdef MULTIPLE_HEAPS if (heap_segment_heap (pSegment)->loh_compacted_p) #else if (loh_compacted_p) #endif { size_t flags = pSegment->flags; if ((flags & heap_segment_flags_loh) #ifdef FEATURE_BASICFREEZE && !(flags & heap_segment_flags_readonly) #endif ) { new_address = old_address + loh_node_relocation_distance (old_address); dprintf (4, (ThreadStressLog::gcRelocateReferenceMsg(), pold_address, old_address, new_address)); *pold_address = new_address; } } } #endif //FEATURE_LOH_COMPACTION } inline void gc_heap::check_class_object_demotion (uint8_t* obj) { #ifdef COLLECTIBLE_CLASS if (is_collectible(obj)) { check_class_object_demotion_internal (obj); } #else UNREFERENCED_PARAMETER(obj); #endif //COLLECTIBLE_CLASS } #ifdef COLLECTIBLE_CLASS NOINLINE void gc_heap::check_class_object_demotion_internal (uint8_t* obj) { if (settings.demotion) { #ifdef MULTIPLE_HEAPS // We set the card without checking the demotion range 'cause at this point // the handle that points to the loader allocator object may or may not have // been relocated by other GC threads. set_card (card_of (obj)); #else THREAD_FROM_HEAP; uint8_t* class_obj = get_class_object (obj); dprintf (3, ("%Ix: got classobj %Ix", obj, class_obj)); uint8_t* temp_class_obj = class_obj; uint8_t** temp = &temp_class_obj; relocate_address (temp THREAD_NUMBER_ARG); check_demotion_helper (temp, obj); #endif //MULTIPLE_HEAPS } } #endif //COLLECTIBLE_CLASS inline void gc_heap::check_demotion_helper (uint8_t** pval, uint8_t* parent_obj) { #ifdef USE_REGIONS uint8_t* child_object = *pval; if (!is_in_heap_range (child_object)) return; int child_object_plan_gen = get_region_plan_gen_num (child_object); bool child_obj_demoted_p = is_region_demoted (child_object); if (child_obj_demoted_p) { set_card (card_of (parent_obj)); } dprintf (3, ("SC %d (%s)", child_object_plan_gen, (child_obj_demoted_p ? "D" : "ND"))); #else //USE_REGIONS // detect if we are demoting an object if ((*pval < demotion_high) && (*pval >= demotion_low)) { dprintf(3, ("setting card %Ix:%Ix", card_of((uint8_t*)pval), (size_t)pval)); set_card (card_of (parent_obj)); } #ifdef MULTIPLE_HEAPS else if (settings.demotion) { dprintf (4, ("Demotion active, computing heap_of object")); gc_heap* hp = heap_of (*pval); if ((*pval < hp->demotion_high) && (*pval >= hp->demotion_low)) { dprintf(3, ("setting card %Ix:%Ix", card_of((uint8_t*)pval), (size_t)pval)); set_card (card_of (parent_obj)); } } #endif //MULTIPLE_HEAPS #endif //USE_REGIONS } inline void gc_heap::reloc_survivor_helper (uint8_t** pval) { THREAD_FROM_HEAP; relocate_address (pval THREAD_NUMBER_ARG); check_demotion_helper (pval, (uint8_t*)pval); } inline void gc_heap::relocate_obj_helper (uint8_t* x, size_t s) { THREAD_FROM_HEAP; if (contain_pointers (x)) { dprintf (3, ("o$%Ix$", (size_t)x)); go_through_object_nostart (method_table(x), x, s, pval, { uint8_t* child = *pval; reloc_survivor_helper (pval); if (child) { dprintf (3, ("%Ix->%Ix->%Ix", (uint8_t*)pval, child, *pval)); } }); } check_class_object_demotion (x); } inline void gc_heap::reloc_ref_in_shortened_obj (uint8_t** address_to_set_card, uint8_t** address_to_reloc) { THREAD_FROM_HEAP; uint8_t* old_val = (address_to_reloc ? *address_to_reloc : 0); relocate_address (address_to_reloc THREAD_NUMBER_ARG); if (address_to_reloc) { dprintf (3, ("SR %Ix: %Ix->%Ix", (uint8_t*)address_to_reloc, old_val, *address_to_reloc)); } check_demotion_helper (address_to_reloc, (uint8_t*)address_to_set_card); } void gc_heap::relocate_pre_plug_info (mark* pinned_plug_entry) { THREAD_FROM_HEAP; uint8_t* plug = pinned_plug (pinned_plug_entry); uint8_t* pre_plug_start = plug - sizeof (plug_and_gap); // Note that we need to add one ptr size here otherwise we may not be able to find the relocated // address. Consider this scenario: // gen1 start | 3-ptr sized NP | PP // 0 | 0x18 | 0x30 // If we are asking for the reloc address of 0x10 we will AV in relocate_address because // the first plug we saw in the brick is 0x18 which means 0x10 will cause us to go back a brick // which is 0, and then we'll AV in tree_search when we try to do node_right_child (tree). pre_plug_start += sizeof (uint8_t*); uint8_t** old_address = &pre_plug_start; uint8_t* old_val = (old_address ? *old_address : 0); relocate_address (old_address THREAD_NUMBER_ARG); if (old_address) { dprintf (3, ("PreR %Ix: %Ix->%Ix, set reloc: %Ix", (uint8_t*)old_address, old_val, *old_address, (pre_plug_start - sizeof (uint8_t*)))); } pinned_plug_entry->set_pre_plug_info_reloc_start (pre_plug_start - sizeof (uint8_t*)); } inline void gc_heap::relocate_shortened_obj_helper (uint8_t* x, size_t s, uint8_t* end, mark* pinned_plug_entry, BOOL is_pinned) { THREAD_FROM_HEAP; uint8_t* plug = pinned_plug (pinned_plug_entry); if (!is_pinned) { //// Temporary - we just wanna make sure we are doing things right when padding is needed. //if ((x + s) < plug) //{ // dprintf (3, ("obj %Ix needed padding: end %Ix is %d bytes from pinned obj %Ix", // x, (x + s), (plug- (x + s)), plug)); // GCToOSInterface::DebugBreak(); //} relocate_pre_plug_info (pinned_plug_entry); } verify_pins_with_post_plug_info("after relocate_pre_plug_info"); uint8_t* saved_plug_info_start = 0; uint8_t** saved_info_to_relocate = 0; if (is_pinned) { saved_plug_info_start = (uint8_t*)(pinned_plug_entry->get_post_plug_info_start()); saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_post_plug_reloc_info()); } else { saved_plug_info_start = (plug - sizeof (plug_and_gap)); saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_pre_plug_reloc_info()); } uint8_t** current_saved_info_to_relocate = 0; uint8_t* child = 0; dprintf (3, ("x: %Ix, pp: %Ix, end: %Ix", x, plug, end)); if (contain_pointers (x)) { dprintf (3,("s$%Ix$", (size_t)x)); go_through_object_nostart (method_table(x), x, s, pval, { dprintf (3, ("obj %Ix, member: %Ix->%Ix", x, (uint8_t*)pval, *pval)); if ((uint8_t*)pval >= end) { current_saved_info_to_relocate = saved_info_to_relocate + ((uint8_t*)pval - saved_plug_info_start) / sizeof (uint8_t**); child = *current_saved_info_to_relocate; reloc_ref_in_shortened_obj (pval, current_saved_info_to_relocate); dprintf (3, ("last part: R-%Ix(saved: %Ix)->%Ix ->%Ix", (uint8_t*)pval, current_saved_info_to_relocate, child, *current_saved_info_to_relocate)); } else { reloc_survivor_helper (pval); } }); } check_class_object_demotion (x); } void gc_heap::relocate_survivor_helper (uint8_t* plug, uint8_t* plug_end) { uint8_t* x = plug; while (x < plug_end) { size_t s = size (x); uint8_t* next_obj = x + Align (s); Prefetch (next_obj); relocate_obj_helper (x, s); assert (s > 0); x = next_obj; } } // if we expanded, right now we are not handling it as We are not saving the new reloc info. void gc_heap::verify_pins_with_post_plug_info (const char* msg) { #if defined (_DEBUG) && defined (VERIFY_HEAP) if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { if (!verify_pinned_queue_p) return; if (settings.heap_expansion) return; for (size_t i = 0; i < mark_stack_tos; i++) { mark& m = mark_stack_array[i]; mark* pinned_plug_entry = pinned_plug_of(i); if (pinned_plug_entry->has_post_plug_info() && pinned_plug_entry->post_short_p() && (pinned_plug_entry->saved_post_plug_debug.gap != 1)) { uint8_t* next_obj = pinned_plug_entry->get_post_plug_info_start() + sizeof (plug_and_gap); // object after pin dprintf (3, ("OFP: %Ix, G: %Ix, R: %Ix, LC: %d, RC: %d", next_obj, node_gap_size (next_obj), node_relocation_distance (next_obj), (int)node_left_child (next_obj), (int)node_right_child (next_obj))); size_t* post_plug_debug = (size_t*)(&m.saved_post_plug_debug); if (node_gap_size (next_obj) != *post_plug_debug) { dprintf (1, ("obj: %Ix gap should be %Ix but it is %Ix", next_obj, *post_plug_debug, (size_t)(node_gap_size (next_obj)))); FATAL_GC_ERROR(); } post_plug_debug++; // can't do node_relocation_distance here as it clears the left bit. //if (node_relocation_distance (next_obj) != *post_plug_debug) if (*((size_t*)(next_obj - 3 * sizeof (size_t))) != *post_plug_debug) { dprintf (1, ("obj: %Ix reloc should be %Ix but it is %Ix", next_obj, *post_plug_debug, (size_t)(node_relocation_distance (next_obj)))); FATAL_GC_ERROR(); } if (node_left_child (next_obj) > 0) { dprintf (1, ("obj: %Ix, vLC: %d\n", next_obj, (int)(node_left_child (next_obj)))); FATAL_GC_ERROR(); } } } dprintf (3, ("%s verified", msg)); } #else UNREFERENCED_PARAMETER(msg); #endif // _DEBUG && VERIFY_HEAP } #ifdef COLLECTIBLE_CLASS // We don't want to burn another ptr size space for pinned plugs to record this so just // set the card unconditionally for collectible objects if we are demoting. inline void gc_heap::unconditional_set_card_collectible (uint8_t* obj) { if (settings.demotion) { set_card (card_of (obj)); } } #endif //COLLECTIBLE_CLASS void gc_heap::relocate_shortened_survivor_helper (uint8_t* plug, uint8_t* plug_end, mark* pinned_plug_entry) { uint8_t* x = plug; uint8_t* p_plug = pinned_plug (pinned_plug_entry); BOOL is_pinned = (plug == p_plug); BOOL check_short_obj_p = (is_pinned ? pinned_plug_entry->post_short_p() : pinned_plug_entry->pre_short_p()); plug_end += sizeof (gap_reloc_pair); //dprintf (3, ("%s %Ix is shortened, and last object %s overwritten", (is_pinned ? "PP" : "NP"), plug, (check_short_obj_p ? "is" : "is not"))); dprintf (3, ("%s %Ix-%Ix short, LO: %s OW", (is_pinned ? "PP" : "NP"), plug, plug_end, (check_short_obj_p ? "is" : "is not"))); verify_pins_with_post_plug_info("begin reloc short surv"); while (x < plug_end) { if (check_short_obj_p && ((DWORD)(plug_end - x) < (DWORD)min_pre_pin_obj_size)) { dprintf (3, ("last obj %Ix is short", x)); if (is_pinned) { #ifdef COLLECTIBLE_CLASS if (pinned_plug_entry->post_short_collectible_p()) unconditional_set_card_collectible (x); #endif //COLLECTIBLE_CLASS // Relocate the saved references based on bits set. uint8_t** saved_plug_info_start = (uint8_t**)(pinned_plug_entry->get_post_plug_info_start()); uint8_t** saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_post_plug_reloc_info()); for (size_t i = 0; i < pinned_plug_entry->get_max_short_bits(); i++) { if (pinned_plug_entry->post_short_bit_p (i)) { reloc_ref_in_shortened_obj ((saved_plug_info_start + i), (saved_info_to_relocate + i)); } } } else { #ifdef COLLECTIBLE_CLASS if (pinned_plug_entry->pre_short_collectible_p()) unconditional_set_card_collectible (x); #endif //COLLECTIBLE_CLASS relocate_pre_plug_info (pinned_plug_entry); // Relocate the saved references based on bits set. uint8_t** saved_plug_info_start = (uint8_t**)(p_plug - sizeof (plug_and_gap)); uint8_t** saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_pre_plug_reloc_info()); for (size_t i = 0; i < pinned_plug_entry->get_max_short_bits(); i++) { if (pinned_plug_entry->pre_short_bit_p (i)) { reloc_ref_in_shortened_obj ((saved_plug_info_start + i), (saved_info_to_relocate + i)); } } } break; } size_t s = size (x); uint8_t* next_obj = x + Align (s); Prefetch (next_obj); if (next_obj >= plug_end) { dprintf (3, ("object %Ix is at the end of the plug %Ix->%Ix", next_obj, plug, plug_end)); verify_pins_with_post_plug_info("before reloc short obj"); relocate_shortened_obj_helper (x, s, (x + Align (s) - sizeof (plug_and_gap)), pinned_plug_entry, is_pinned); } else { relocate_obj_helper (x, s); } assert (s > 0); x = next_obj; } verify_pins_with_post_plug_info("end reloc short surv"); } void gc_heap::relocate_survivors_in_plug (uint8_t* plug, uint8_t* plug_end, BOOL check_last_object_p, mark* pinned_plug_entry) { dprintf (3,("RP: [%Ix(%Ix->%Ix),%Ix(%Ix->%Ix)[", (size_t)plug, brick_of (plug), (size_t)brick_table[brick_of (plug)], (size_t)plug_end, brick_of (plug_end), (size_t)brick_table[brick_of (plug_end)])); if (check_last_object_p) { relocate_shortened_survivor_helper (plug, plug_end, pinned_plug_entry); } else { relocate_survivor_helper (plug, plug_end); } } void gc_heap::relocate_survivors_in_brick (uint8_t* tree, relocate_args* args) { assert ((tree != NULL)); dprintf (3, ("tree: %Ix, args->last_plug: %Ix, left: %Ix, right: %Ix, gap(t): %Ix", tree, args->last_plug, (tree + node_left_child (tree)), (tree + node_right_child (tree)), node_gap_size (tree))); if (node_left_child (tree)) { relocate_survivors_in_brick (tree + node_left_child (tree), args); } { uint8_t* plug = tree; BOOL has_post_plug_info_p = FALSE; BOOL has_pre_plug_info_p = FALSE; if (tree == oldest_pinned_plug) { args->pinned_plug_entry = get_oldest_pinned_entry (&has_pre_plug_info_p, &has_post_plug_info_p); assert (tree == pinned_plug (args->pinned_plug_entry)); dprintf (3, ("tree is the oldest pin: %Ix", tree)); } if (args->last_plug) { size_t gap_size = node_gap_size (tree); uint8_t* gap = (plug - gap_size); dprintf (3, ("tree: %Ix, gap: %Ix (%Ix)", tree, gap, gap_size)); assert (gap_size >= Align (min_obj_size)); uint8_t* last_plug_end = gap; BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p); { relocate_survivors_in_plug (args->last_plug, last_plug_end, check_last_object_p, args->pinned_plug_entry); } } else { assert (!has_pre_plug_info_p); } args->last_plug = plug; args->is_shortened = has_post_plug_info_p; if (has_post_plug_info_p) { dprintf (3, ("setting %Ix as shortened", plug)); } dprintf (3, ("last_plug: %Ix(shortened: %d)", plug, (args->is_shortened ? 1 : 0))); } if (node_right_child (tree)) { relocate_survivors_in_brick (tree + node_right_child (tree), args); } } inline void gc_heap::update_oldest_pinned_plug() { oldest_pinned_plug = (pinned_plug_que_empty_p() ? 0 : pinned_plug (oldest_pin())); } heap_segment* gc_heap::get_start_segment (generation* gen) { heap_segment* start_heap_segment = heap_segment_rw (generation_start_segment (gen)); #ifdef USE_REGIONS heap_segment* current_heap_segment = heap_segment_non_sip (start_heap_segment); if (current_heap_segment != start_heap_segment) { dprintf (REGIONS_LOG, ("h%d skipped gen%d SIP regions, start %Ix->%Ix", heap_number, (current_heap_segment ? heap_segment_gen_num (current_heap_segment) : -1), heap_segment_mem (start_heap_segment), (current_heap_segment ? heap_segment_mem (current_heap_segment) : 0))); } start_heap_segment = current_heap_segment; #endif //USE_REGIONS return start_heap_segment; } void gc_heap::relocate_survivors (int condemned_gen_number, uint8_t* first_condemned_address) { reset_pinned_queue_bos(); update_oldest_pinned_plug(); int stop_gen_idx = get_stop_generation_index (condemned_gen_number); #ifndef USE_REGIONS assert (first_condemned_address == generation_allocation_start (generation_of (condemned_gen_number))); #endif //!USE_REGIONS for (int i = condemned_gen_number; i >= stop_gen_idx; i--) { generation* condemned_gen = generation_of (i); heap_segment* current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen)); #ifdef USE_REGIONS current_heap_segment = relocate_advance_to_non_sip (current_heap_segment); if (!current_heap_segment) continue; #endif //USE_REGIONS uint8_t* start_address = get_soh_start_object (current_heap_segment, condemned_gen); size_t current_brick = brick_of (start_address); PREFIX_ASSUME(current_heap_segment != NULL); uint8_t* end_address = heap_segment_allocated (current_heap_segment); size_t end_brick = brick_of (end_address - 1); relocate_args args; args.is_shortened = FALSE; args.pinned_plug_entry = 0; args.last_plug = 0; while (1) { if (current_brick > end_brick) { if (args.last_plug) { { assert (!(args.is_shortened)); relocate_survivors_in_plug (args.last_plug, heap_segment_allocated (current_heap_segment), args.is_shortened, args.pinned_plug_entry); } args.last_plug = 0; } heap_segment* next_heap_segment = heap_segment_next (current_heap_segment); if (next_heap_segment) { #ifdef USE_REGIONS next_heap_segment = relocate_advance_to_non_sip (next_heap_segment); #endif //USE_REGIONS if (next_heap_segment) { current_heap_segment = next_heap_segment; current_brick = brick_of (heap_segment_mem (current_heap_segment)); end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1); continue; } else break; } else { break; } } { int brick_entry = brick_table [ current_brick ]; if (brick_entry >= 0) { relocate_survivors_in_brick (brick_address (current_brick) + brick_entry -1, &args); } } current_brick++; } } } void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, walk_relocate_args* args) { if (check_last_object_p) { size += sizeof (gap_reloc_pair); mark* entry = args->pinned_plug_entry; if (args->is_shortened) { assert (entry->has_post_plug_info()); entry->swap_post_plug_and_saved_for_profiler(); } else { assert (entry->has_pre_plug_info()); entry->swap_pre_plug_and_saved_for_profiler(); } } ptrdiff_t last_plug_relocation = node_relocation_distance (plug); STRESS_LOG_PLUG_MOVE(plug, (plug + size), -last_plug_relocation); ptrdiff_t reloc = settings.compaction ? last_plug_relocation : 0; (args->fn) (plug, (plug + size), reloc, args->profiling_context, !!settings.compaction, false); if (check_last_object_p) { mark* entry = args->pinned_plug_entry; if (args->is_shortened) { entry->swap_post_plug_and_saved_for_profiler(); } else { entry->swap_pre_plug_and_saved_for_profiler(); } } } void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args) { assert ((tree != NULL)); if (node_left_child (tree)) { walk_relocation_in_brick (tree + node_left_child (tree), args); } uint8_t* plug = tree; BOOL has_pre_plug_info_p = FALSE; BOOL has_post_plug_info_p = FALSE; if (tree == oldest_pinned_plug) { args->pinned_plug_entry = get_oldest_pinned_entry (&has_pre_plug_info_p, &has_post_plug_info_p); assert (tree == pinned_plug (args->pinned_plug_entry)); } if (args->last_plug != 0) { size_t gap_size = node_gap_size (tree); uint8_t* gap = (plug - gap_size); uint8_t* last_plug_end = gap; size_t last_plug_size = (last_plug_end - args->last_plug); dprintf (3, ("tree: %Ix, last_plug: %Ix, gap: %Ix(%Ix), last_plug_end: %Ix, size: %Ix", tree, args->last_plug, gap, gap_size, last_plug_end, last_plug_size)); BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p); if (!check_last_object_p) { assert (last_plug_size >= Align (min_obj_size)); } walk_plug (args->last_plug, last_plug_size, check_last_object_p, args); } else { assert (!has_pre_plug_info_p); } dprintf (3, ("set args last plug to plug: %Ix", plug)); args->last_plug = plug; args->is_shortened = has_post_plug_info_p; if (node_right_child (tree)) { walk_relocation_in_brick (tree + node_right_child (tree), args); } } void gc_heap::walk_relocation (void* profiling_context, record_surv_fn fn) { int condemned_gen_number = settings.condemned_generation; int stop_gen_idx = get_stop_generation_index (condemned_gen_number); for (int i = condemned_gen_number; i >= stop_gen_idx; i--) { generation* condemned_gen = generation_of (i); heap_segment* current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen)); uint8_t* start_address = get_soh_start_object (current_heap_segment, condemned_gen); size_t current_brick = brick_of (start_address); PREFIX_ASSUME(current_heap_segment != NULL); reset_pinned_queue_bos(); update_oldest_pinned_plug(); size_t end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1); walk_relocate_args args; args.is_shortened = FALSE; args.pinned_plug_entry = 0; args.last_plug = 0; args.profiling_context = profiling_context; args.fn = fn; while (1) { if (current_brick > end_brick) { if (args.last_plug) { walk_plug (args.last_plug, (heap_segment_allocated (current_heap_segment) - args.last_plug), args.is_shortened, &args); args.last_plug = 0; } if (heap_segment_next_rw (current_heap_segment)) { current_heap_segment = heap_segment_next_rw (current_heap_segment); current_brick = brick_of (heap_segment_mem (current_heap_segment)); end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1); continue; } else { break; } } { int brick_entry = brick_table [ current_brick ]; if (brick_entry >= 0) { walk_relocation_in_brick (brick_address (current_brick) + brick_entry - 1, &args); } } current_brick++; } } } void gc_heap::walk_survivors (record_surv_fn fn, void* context, walk_surv_type type) { if (type == walk_for_gc) walk_survivors_relocation (context, fn); #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) else if (type == walk_for_bgc) walk_survivors_for_bgc (context, fn); #endif //BACKGROUND_GC && FEATURE_EVENT_TRACE else assert (!"unknown type!"); } #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) void gc_heap::walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn) { assert(settings.concurrent); for (int i = get_start_generation_index(); i < total_generation_count; i++) { int align_const = get_alignment_constant (i == max_generation); heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (i))); while (seg) { uint8_t* o = heap_segment_mem (seg); uint8_t* end = heap_segment_allocated (seg); while (o < end) { if (method_table(o) == g_gc_pFreeObjectMethodTable) { o += Align (size (o), align_const); continue; } // It's survived. Make a fake plug, starting at o, // and send the event uint8_t* plug_start = o; while (method_table(o) != g_gc_pFreeObjectMethodTable) { o += Align (size (o), align_const); if (o >= end) { break; } } uint8_t* plug_end = o; fn (plug_start, plug_end, 0, // Reloc distance == 0 as this is non-compacting profiling_context, false, // Non-compacting true); // BGC } seg = heap_segment_next (seg); } } } #endif //BACKGROUND_GC && FEATURE_EVENT_TRACE void gc_heap::relocate_phase (int condemned_gen_number, uint8_t* first_condemned_address) { ScanContext sc; sc.thread_number = heap_number; sc.promotion = FALSE; sc.concurrent = FALSE; #ifdef MULTIPLE_HEAPS //join all threads to make sure they are synchronized dprintf(3, ("Joining after end of plan")); gc_t_join.join(this, gc_join_begin_relocate_phase); if (gc_t_join.joined()) { #endif //MULTIPLE_HEAPS #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { gc_time_info[time_relocate] = GetHighPrecisionTimeStamp(); } #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS //join all threads to make sure they are synchronized dprintf(3, ("Restarting for relocation")); gc_t_join.restart(); } #endif //MULTIPLE_HEAPS dprintf (2, (ThreadStressLog::gcStartRelocateMsg(), heap_number)); dprintf(3,("Relocating roots")); GCScan::GcScanRoots(GCHeap::Relocate, condemned_gen_number, max_generation, &sc); verify_pins_with_post_plug_info("after reloc stack"); #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { scan_background_roots (GCHeap::Relocate, heap_number, &sc); } #endif //BACKGROUND_GC #ifdef FEATURE_CARD_MARKING_STEALING // for card marking stealing, do the other relocations *before* we scan the older generations // this gives us a chance to make up for imbalance in these phases later { dprintf(3, ("Relocating survivors")); relocate_survivors(condemned_gen_number, first_condemned_address); } #ifdef FEATURE_PREMORTEM_FINALIZATION dprintf(3, ("Relocating finalization data")); finalize_queue->RelocateFinalizationData(condemned_gen_number, __this); #endif // FEATURE_PREMORTEM_FINALIZATION { dprintf(3, ("Relocating handle table")); GCScan::GcScanHandles(GCHeap::Relocate, condemned_gen_number, max_generation, &sc); } #endif // FEATURE_CARD_MARKING_STEALING if (condemned_gen_number != max_generation) { #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) if (!card_mark_done_soh) #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING { dprintf (3, ("Relocating cross generation pointers on heap %d", heap_number)); mark_through_cards_for_segments(&gc_heap::relocate_address, TRUE THIS_ARG); verify_pins_with_post_plug_info("after reloc cards"); #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) card_mark_done_soh = true; #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING } } if (condemned_gen_number != max_generation) { #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) if (!card_mark_done_uoh) #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING { dprintf (3, ("Relocating cross generation pointers for uoh objects on heap %d", heap_number)); for (int i = uoh_start_generation; i < total_generation_count; i++) { #ifndef ALLOW_REFERENCES_IN_POH if (i != poh_generation) #endif //ALLOW_REFERENCES_IN_POH mark_through_cards_for_uoh_objects(&gc_heap::relocate_address, i, TRUE THIS_ARG); } #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) card_mark_done_uoh = true; #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING } } else { #ifdef FEATURE_LOH_COMPACTION if (loh_compacted_p) { assert (settings.condemned_generation == max_generation); relocate_in_loh_compact(); } else #endif //FEATURE_LOH_COMPACTION { relocate_in_uoh_objects (loh_generation); } #ifdef ALLOW_REFERENCES_IN_POH relocate_in_uoh_objects (poh_generation); #endif } #ifndef FEATURE_CARD_MARKING_STEALING // moved this code *before* we scan the older generations via mark_through_cards_xxx // this gives us a chance to have mark_through_cards_xxx make up for imbalance in the other relocations { dprintf(3,("Relocating survivors")); relocate_survivors (condemned_gen_number, first_condemned_address); } #ifdef FEATURE_PREMORTEM_FINALIZATION dprintf(3,("Relocating finalization data")); finalize_queue->RelocateFinalizationData (condemned_gen_number, __this); #endif // FEATURE_PREMORTEM_FINALIZATION { dprintf(3,("Relocating handle table")); GCScan::GcScanHandles(GCHeap::Relocate, condemned_gen_number, max_generation, &sc); } #endif // !FEATURE_CARD_MARKING_STEALING #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) if (condemned_gen_number != max_generation) { // check the other heaps cyclically and try to help out where the relocation isn't done for (int i = 0; i < gc_heap::n_heaps; i++) { int heap_number_to_look_at = (i + heap_number) % gc_heap::n_heaps; gc_heap* hp = gc_heap::g_heaps[heap_number_to_look_at]; if (!hp->card_mark_done_soh) { dprintf(3, ("Relocating cross generation pointers on heap %d", hp->heap_number)); hp->mark_through_cards_for_segments(&gc_heap::relocate_address, TRUE THIS_ARG); hp->card_mark_done_soh = true; } if (!hp->card_mark_done_uoh) { dprintf(3, ("Relocating cross generation pointers for uoh objects on heap %d", hp->heap_number)); for (int i = uoh_start_generation; i < total_generation_count; i++) { #ifndef ALLOW_REFERENCES_IN_POH if (i != poh_generation) #endif //ALLOW_REFERENCES_IN_POH hp->mark_through_cards_for_uoh_objects(&gc_heap::relocate_address, i, TRUE THIS_ARG); } hp->card_mark_done_uoh = true; } } } #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING dprintf(2, (ThreadStressLog::gcEndRelocateMsg(), heap_number)); } // This compares to see if tree is the current pinned plug and returns info // for this pinned plug. Also advances the pinned queue if that's the case. // // We don't change the values of the plug info if tree is not the same as // the current pinned plug - the caller is responsible for setting the right // values to begin with. // // POPO TODO: We are keeping this temporarily as this is also used by realloc // where it passes FALSE to deque_p, change it to use the same optimization // as relocate. Not as essential since realloc is already a slow path. mark* gc_heap::get_next_pinned_entry (uint8_t* tree, BOOL* has_pre_plug_info_p, BOOL* has_post_plug_info_p, BOOL deque_p) { if (!pinned_plug_que_empty_p()) { mark* oldest_entry = oldest_pin(); uint8_t* oldest_plug = pinned_plug (oldest_entry); if (tree == oldest_plug) { *has_pre_plug_info_p = oldest_entry->has_pre_plug_info(); *has_post_plug_info_p = oldest_entry->has_post_plug_info(); if (deque_p) { deque_pinned_plug(); } dprintf (3, ("found a pinned plug %Ix, pre: %d, post: %d", tree, (*has_pre_plug_info_p ? 1 : 0), (*has_post_plug_info_p ? 1 : 0))); return oldest_entry; } } return NULL; } // This also deques the oldest entry and update the oldest plug mark* gc_heap::get_oldest_pinned_entry (BOOL* has_pre_plug_info_p, BOOL* has_post_plug_info_p) { mark* oldest_entry = oldest_pin(); *has_pre_plug_info_p = oldest_entry->has_pre_plug_info(); *has_post_plug_info_p = oldest_entry->has_post_plug_info(); deque_pinned_plug(); update_oldest_pinned_plug(); return oldest_entry; } inline void gc_heap::copy_cards_range (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p) { if (copy_cards_p) copy_cards_for_addresses (dest, src, len); else clear_card_for_addresses (dest, dest + len); } // POPO TODO: We should actually just recover the artificially made gaps here..because when we copy // we always copy the earlier plugs first which means we won't need the gap sizes anymore. This way // we won't need to individually recover each overwritten part of plugs. inline void gc_heap::gcmemcopy (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p) { if (dest != src) { #ifdef BACKGROUND_GC if (current_c_gc_state == c_gc_state_marking) { //TODO: should look to see whether we should consider changing this // to copy a consecutive region of the mark array instead. copy_mark_bits_for_addresses (dest, src, len); } #endif //BACKGROUND_GC #ifdef DOUBLY_LINKED_FL BOOL set_bgc_mark_bits_p = is_plug_bgc_mark_bit_set (src); if (set_bgc_mark_bits_p) { clear_plug_bgc_mark_bit (src); } BOOL make_free_obj_p = FALSE; if (len <= min_free_item_no_prev) { make_free_obj_p = is_free_obj_in_compact_bit_set (src); if (make_free_obj_p) { clear_free_obj_in_compact_bit (src); } } #endif //DOUBLY_LINKED_FL //dprintf(3,(" Memcopy [%Ix->%Ix, %Ix->%Ix[", (size_t)src, (size_t)dest, (size_t)src+len, (size_t)dest+len)); dprintf(3,(ThreadStressLog::gcMemCopyMsg(), (size_t)src, (size_t)dest, (size_t)src+len, (size_t)dest+len)); memcopy (dest - plug_skew, src - plug_skew, len); #ifdef DOUBLY_LINKED_FL if (set_bgc_mark_bits_p) { uint8_t* dest_o = dest; uint8_t* dest_end_o = dest + len; while (dest_o < dest_end_o) { uint8_t* next_o = dest_o + Align (size (dest_o)); background_mark (dest_o, background_saved_lowest_address, background_saved_highest_address); dest_o = next_o; } dprintf (3333, ("[h%d] GM: %Ix(%Ix-%Ix)->%Ix(%Ix-%Ix)", heap_number, dest, (size_t)(&mark_array [mark_word_of (dest)]), (size_t)(mark_array [mark_word_of (dest)]), dest_end_o, (size_t)(&mark_array [mark_word_of (dest_o)]), (size_t)(mark_array [mark_word_of (dest_o)]))); } if (make_free_obj_p) { size_t* filler_free_obj_size_location = (size_t*)(dest + min_free_item_no_prev); size_t filler_free_obj_size = *filler_free_obj_size_location; make_unused_array ((dest + len), filler_free_obj_size); dprintf (3333, ("[h%d] smallobj, %Ix(%Id): %Ix->%Ix", heap_number, filler_free_obj_size_location, filler_free_obj_size, (dest + len), (dest + len + filler_free_obj_size))); } #endif //DOUBLY_LINKED_FL #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP if (SoftwareWriteWatch::IsEnabledForGCHeap()) { // The ranges [src - plug_kew .. src[ and [src + len - plug_skew .. src + len[ are ObjHeaders, which don't have GC // references, and are not relevant for write watch. The latter range actually corresponds to the ObjHeader for the // object at (src + len), so it can be ignored anyway. SoftwareWriteWatch::SetDirtyRegion(dest, len - plug_skew); } #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP copy_cards_range (dest, src, len, copy_cards_p); } } void gc_heap::compact_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, compact_args* args) { args->print(); uint8_t* reloc_plug = plug + args->last_plug_relocation; if (check_last_object_p) { size += sizeof (gap_reloc_pair); mark* entry = args->pinned_plug_entry; if (args->is_shortened) { assert (entry->has_post_plug_info()); entry->swap_post_plug_and_saved(); } else { assert (entry->has_pre_plug_info()); entry->swap_pre_plug_and_saved(); } } int old_brick_entry = brick_table [brick_of (plug)]; assert (node_relocation_distance (plug) == args->last_plug_relocation); #ifdef FEATURE_STRUCTALIGN ptrdiff_t alignpad = node_alignpad(plug); if (alignpad) { make_unused_array (reloc_plug - alignpad, alignpad); if (brick_of (reloc_plug - alignpad) != brick_of (reloc_plug)) { // The alignment padding is straddling one or more bricks; // it has to be the last "object" of its first brick. fix_brick_to_highest (reloc_plug - alignpad, reloc_plug); } } #else // FEATURE_STRUCTALIGN size_t unused_arr_size = 0; BOOL already_padded_p = FALSE; #ifdef SHORT_PLUGS if (is_plug_padded (plug)) { already_padded_p = TRUE; clear_plug_padded (plug); unused_arr_size = Align (min_obj_size); } #endif //SHORT_PLUGS if (node_realigned (plug)) { unused_arr_size += switch_alignment_size (already_padded_p); } if (unused_arr_size != 0) { make_unused_array (reloc_plug - unused_arr_size, unused_arr_size); if (brick_of (reloc_plug - unused_arr_size) != brick_of (reloc_plug)) { dprintf (3, ("fix B for padding: %Id: %Ix->%Ix", unused_arr_size, (reloc_plug - unused_arr_size), reloc_plug)); // The alignment padding is straddling one or more bricks; // it has to be the last "object" of its first brick. fix_brick_to_highest (reloc_plug - unused_arr_size, reloc_plug); } } #endif // FEATURE_STRUCTALIGN #ifdef SHORT_PLUGS if (is_plug_padded (plug)) { make_unused_array (reloc_plug - Align (min_obj_size), Align (min_obj_size)); if (brick_of (reloc_plug - Align (min_obj_size)) != brick_of (reloc_plug)) { // The alignment padding is straddling one or more bricks; // it has to be the last "object" of its first brick. fix_brick_to_highest (reloc_plug - Align (min_obj_size), reloc_plug); } } #endif //SHORT_PLUGS gcmemcopy (reloc_plug, plug, size, args->copy_cards_p); if (args->check_gennum_p) { int src_gennum = args->src_gennum; if (src_gennum == -1) { src_gennum = object_gennum (plug); } int dest_gennum = object_gennum_plan (reloc_plug); if (src_gennum < dest_gennum) { generation_allocation_size (generation_of (dest_gennum)) += size; } } size_t current_reloc_brick = args->current_compacted_brick; if (brick_of (reloc_plug) != current_reloc_brick) { dprintf (3, ("last reloc B: %Ix, current reloc B: %Ix", current_reloc_brick, brick_of (reloc_plug))); if (args->before_last_plug) { dprintf (3,(" fixing last brick %Ix to point to last plug %Ix(%Ix)", current_reloc_brick, args->before_last_plug, (args->before_last_plug - brick_address (current_reloc_brick)))); { set_brick (current_reloc_brick, args->before_last_plug - brick_address (current_reloc_brick)); } } current_reloc_brick = brick_of (reloc_plug); } size_t end_brick = brick_of (reloc_plug + size-1); if (end_brick != current_reloc_brick) { // The plug is straddling one or more bricks // It has to be the last plug of its first brick dprintf (3,("plug spanning multiple bricks, fixing first brick %Ix to %Ix(%Ix)", current_reloc_brick, (size_t)reloc_plug, (reloc_plug - brick_address (current_reloc_brick)))); { set_brick (current_reloc_brick, reloc_plug - brick_address (current_reloc_brick)); } // update all intervening brick size_t brick = current_reloc_brick + 1; dprintf (3,("setting intervening bricks %Ix->%Ix to -1", brick, (end_brick - 1))); while (brick < end_brick) { set_brick (brick, -1); brick++; } // code last brick offset as a plug address args->before_last_plug = brick_address (end_brick) -1; current_reloc_brick = end_brick; dprintf (3, ("setting before last to %Ix, last brick to %Ix", args->before_last_plug, current_reloc_brick)); } else { dprintf (3, ("still in the same brick: %Ix", end_brick)); args->before_last_plug = reloc_plug; } args->current_compacted_brick = current_reloc_brick; if (check_last_object_p) { mark* entry = args->pinned_plug_entry; if (args->is_shortened) { entry->swap_post_plug_and_saved(); } else { entry->swap_pre_plug_and_saved(); } } } void gc_heap::compact_in_brick (uint8_t* tree, compact_args* args) { assert (tree != NULL); int left_node = node_left_child (tree); int right_node = node_right_child (tree); ptrdiff_t relocation = node_relocation_distance (tree); args->print(); if (left_node) { dprintf (3, ("B: L: %d->%Ix", left_node, (tree + left_node))); compact_in_brick ((tree + left_node), args); } uint8_t* plug = tree; BOOL has_pre_plug_info_p = FALSE; BOOL has_post_plug_info_p = FALSE; if (tree == oldest_pinned_plug) { args->pinned_plug_entry = get_oldest_pinned_entry (&has_pre_plug_info_p, &has_post_plug_info_p); assert (tree == pinned_plug (args->pinned_plug_entry)); } if (args->last_plug != 0) { size_t gap_size = node_gap_size (tree); uint8_t* gap = (plug - gap_size); uint8_t* last_plug_end = gap; size_t last_plug_size = (last_plug_end - args->last_plug); assert ((last_plug_size & (sizeof(PTR_PTR) - 1)) == 0); dprintf (3, ("tree: %Ix, last_plug: %Ix, gap: %Ix(%Ix), last_plug_end: %Ix, size: %Ix", tree, args->last_plug, gap, gap_size, last_plug_end, last_plug_size)); BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p); if (!check_last_object_p) { assert (last_plug_size >= Align (min_obj_size)); } compact_plug (args->last_plug, last_plug_size, check_last_object_p, args); } else { assert (!has_pre_plug_info_p); } dprintf (3, ("set args last plug to plug: %Ix, reloc: %Ix", plug, relocation)); args->last_plug = plug; args->last_plug_relocation = relocation; args->is_shortened = has_post_plug_info_p; if (right_node) { dprintf (3, ("B: R: %d->%Ix", right_node, (tree + right_node))); compact_in_brick ((tree + right_node), args); } } // This returns the recovered size for gen2 plugs as that's what we need // mostly - would be nice to make it work for all generations. size_t gc_heap::recover_saved_pinned_info() { reset_pinned_queue_bos(); size_t total_recovered_sweep_size = 0; while (!(pinned_plug_que_empty_p())) { mark* oldest_entry = oldest_pin(); size_t recovered_sweep_size = oldest_entry->recover_plug_info(); if (recovered_sweep_size > 0) { uint8_t* plug = pinned_plug (oldest_entry); if (object_gennum (plug) == max_generation) { dprintf (3, ("recovered %Ix(%Id) from pin", plug, recovered_sweep_size)); total_recovered_sweep_size += recovered_sweep_size; } } #ifdef GC_CONFIG_DRIVEN if (oldest_entry->has_pre_plug_info() && oldest_entry->has_post_plug_info()) record_interesting_data_point (idp_pre_and_post_pin); else if (oldest_entry->has_pre_plug_info()) record_interesting_data_point (idp_pre_pin); else if (oldest_entry->has_post_plug_info()) record_interesting_data_point (idp_post_pin); #endif //GC_CONFIG_DRIVEN deque_pinned_plug(); } return total_recovered_sweep_size; } void gc_heap::compact_phase (int condemned_gen_number, uint8_t* first_condemned_address, BOOL clear_cards) { #ifdef MULTIPLE_HEAPS dprintf(3, ("Joining after end of relocation")); gc_t_join.join(this, gc_join_relocate_phase_done); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { gc_time_info[time_compact] = GetHighPrecisionTimeStamp(); gc_time_info[time_relocate] = gc_time_info[time_compact] - gc_time_info[time_relocate]; } #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS dprintf(3, ("Restarting for compaction")); gc_t_join.restart(); #endif //MULTIPLE_HEAPS } dprintf (2, (ThreadStressLog::gcStartCompactMsg(), heap_number, first_condemned_address, brick_of (first_condemned_address))); #ifdef FEATURE_LOH_COMPACTION if (loh_compacted_p) { compact_loh(); } #endif //FEATURE_LOH_COMPACTION reset_pinned_queue_bos(); update_oldest_pinned_plug(); BOOL reused_seg = expand_reused_seg_p(); if (reused_seg) { for (int i = 1; i <= max_generation; i++) { generation_allocation_size (generation_of (i)) = 0; } } int stop_gen_idx = get_stop_generation_index (condemned_gen_number); for (int i = condemned_gen_number; i >= stop_gen_idx; i--) { generation* condemned_gen = generation_of (i); heap_segment* current_heap_segment = get_start_segment (condemned_gen); #ifdef USE_REGIONS if (!current_heap_segment) continue; size_t current_brick = brick_of (heap_segment_mem (current_heap_segment)); #else size_t current_brick = brick_of (first_condemned_address); #endif //USE_REGIONS uint8_t* end_address = heap_segment_allocated (current_heap_segment); #ifndef USE_REGIONS if ((first_condemned_address >= end_address) && (condemned_gen_number < max_generation)) { return; } #endif //!USE_REGIONS size_t end_brick = brick_of (end_address-1); compact_args args; args.last_plug = 0; args.before_last_plug = 0; args.current_compacted_brick = ~((size_t)1); args.is_shortened = FALSE; args.pinned_plug_entry = 0; args.copy_cards_p = (condemned_gen_number >= 1) || !clear_cards; args.check_gennum_p = reused_seg; if (args.check_gennum_p) { args.src_gennum = ((current_heap_segment == ephemeral_heap_segment) ? -1 : 2); } #ifdef USE_REGIONS assert (!args.check_gennum_p); #endif //USE_REGIONS while (1) { if (current_brick > end_brick) { if (args.last_plug != 0) { dprintf (3, ("compacting last plug: %Ix", args.last_plug)) compact_plug (args.last_plug, (heap_segment_allocated (current_heap_segment) - args.last_plug), args.is_shortened, &args); } heap_segment* next_heap_segment = heap_segment_next_non_sip (current_heap_segment); if (next_heap_segment) { current_heap_segment = next_heap_segment; current_brick = brick_of (heap_segment_mem (current_heap_segment)); end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1); args.last_plug = 0; if (args.check_gennum_p) { args.src_gennum = ((current_heap_segment == ephemeral_heap_segment) ? -1 : 2); } continue; } else { if (args.before_last_plug !=0) { dprintf (3, ("Fixing last brick %Ix to point to plug %Ix", args.current_compacted_brick, (size_t)args.before_last_plug)); assert (args.current_compacted_brick != ~1u); set_brick (args.current_compacted_brick, args.before_last_plug - brick_address (args.current_compacted_brick)); } break; } } { int brick_entry = brick_table [ current_brick ]; dprintf (3, ("B: %Ix(%Ix)->%Ix", current_brick, (size_t)brick_entry, (brick_address (current_brick) + brick_entry - 1))); if (brick_entry >= 0) { compact_in_brick ((brick_address (current_brick) + brick_entry -1), &args); } } current_brick++; } } recover_saved_pinned_info(); concurrent_print_time_delta ("compact end"); dprintf (2, (ThreadStressLog::gcEndCompactMsg(), heap_number)); } #ifdef MULTIPLE_HEAPS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4702) // C4702: unreachable code: gc_thread_function may not return #endif //_MSC_VER void gc_heap::gc_thread_stub (void* arg) { gc_heap* heap = (gc_heap*)arg; if (!gc_thread_no_affinitize_p) { // We are about to set affinity for GC threads. It is a good place to set up NUMA and // CPU groups because the process mask, processor number, and group number are all // readily available. set_thread_affinity_for_heap (heap->heap_number, heap_select::find_proc_no_from_heap_no (heap->heap_number)); } // server GC threads run at a higher priority than normal. GCToOSInterface::BoostThreadPriority(); void* tmp = _alloca (256*heap->heap_number); heap->gc_thread_function(); } #ifdef _MSC_VER #pragma warning(pop) #endif //_MSC_VER #endif //MULTIPLE_HEAPS #ifdef BACKGROUND_GC #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4702) // C4702: unreachable code: gc_thread_function may not return #endif //_MSC_VER void gc_heap::bgc_thread_stub (void* arg) { gc_heap* heap = (gc_heap*)arg; heap->bgc_thread = GCToEEInterface::GetThread(); assert(heap->bgc_thread != nullptr); heap->bgc_thread_function(); } #ifdef _MSC_VER #pragma warning(pop) #endif //_MSC_VER void gc_heap::background_drain_mark_list (int thread) { #ifndef MULTIPLE_HEAPS UNREFERENCED_PARAMETER(thread); #endif //!MULTIPLE_HEAPS size_t saved_c_mark_list_index = c_mark_list_index; if (saved_c_mark_list_index) { concurrent_print_time_delta ("SML"); } while (c_mark_list_index != 0) { size_t current_index = c_mark_list_index - 1; uint8_t* o = c_mark_list [current_index]; background_mark_object (o THREAD_NUMBER_ARG); c_mark_list_index--; } if (saved_c_mark_list_index) { concurrent_print_time_delta ("EML"); } fire_drain_mark_list_event (saved_c_mark_list_index); } // The background GC version of scan_dependent_handles (see that method for a more in-depth comment). #ifdef MULTIPLE_HEAPS // Since we only scan dependent handles while we are stopped we'll never interfere with FGCs scanning // them. So we can use the same static variables. void gc_heap::background_scan_dependent_handles (ScanContext *sc) { // Whenever we call this method there may have been preceding object promotions. So set // s_fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set // based on the how the scanning proceeded). s_fUnscannedPromotions = TRUE; // We don't know how many times we need to loop yet. In particular we can't base the loop condition on // the state of this thread's portion of the dependent handle table. That's because promotions on other // threads could cause handle promotions to become necessary here. Even if there are definitely no more // promotions possible in this thread's handles, we still have to stay in lock-step with those worker // threads that haven't finished yet (each GC worker thread has to join exactly the same number of times // as all the others or they'll get out of step). while (true) { // The various worker threads are all currently racing in this code. We need to work out if at least // one of them think they have work to do this cycle. Each thread needs to rescan its portion of the // dependent handle table when both of the following conditions apply: // 1) At least one (arbitrary) object might have been promoted since the last scan (because if this // object happens to correspond to a primary in one of our handles we might potentially have to // promote the associated secondary). // 2) The table for this thread has at least one handle with a secondary that isn't promoted yet. // // The first condition is represented by s_fUnscannedPromotions. This is always non-zero for the first // iteration of this loop (see comment above) and in subsequent cycles each thread updates this // whenever a mark stack overflow occurs or scanning their dependent handles results in a secondary // being promoted. This value is cleared back to zero in a synchronized fashion in the join that // follows below. Note that we can't read this outside of the join since on any iteration apart from // the first threads will be racing between reading this value and completing their previous // iteration's table scan. // // The second condition is tracked by the dependent handle code itself on a per worker thread basis // (and updated by the GcDhReScan() method). We call GcDhUnpromotedHandlesExist() on each thread to // determine the local value and collect the results into the s_fUnpromotedHandles variable in what is // effectively an OR operation. As per s_fUnscannedPromotions we can't read the final result until // we're safely joined. if (GCScan::GcDhUnpromotedHandlesExist(sc)) s_fUnpromotedHandles = TRUE; // Synchronize all the threads so we can read our state variables safely. The following shared // variable (indicating whether we should scan the tables or terminate the loop) will be set by a // single thread inside the join. bgc_t_join.join(this, gc_join_scan_dependent_handles); if (bgc_t_join.joined()) { // We're synchronized so it's safe to read our shared state variables. We update another shared // variable to indicate to all threads whether we'll be scanning for another cycle or terminating // the loop. We scan if there has been at least one object promotion since last time and at least // one thread has a dependent handle table with a potential handle promotion possible. s_fScanRequired = s_fUnscannedPromotions && s_fUnpromotedHandles; // Reset our shared state variables (ready to be set again on this scan or with a good initial // value for the next call if we're terminating the loop). s_fUnscannedPromotions = FALSE; s_fUnpromotedHandles = FALSE; if (!s_fScanRequired) { #ifndef USE_REGIONS uint8_t* all_heaps_max = 0; uint8_t* all_heaps_min = MAX_PTR; int i; for (i = 0; i < n_heaps; i++) { if (all_heaps_max < g_heaps[i]->background_max_overflow_address) all_heaps_max = g_heaps[i]->background_max_overflow_address; if (all_heaps_min > g_heaps[i]->background_min_overflow_address) all_heaps_min = g_heaps[i]->background_min_overflow_address; } for (i = 0; i < n_heaps; i++) { g_heaps[i]->background_max_overflow_address = all_heaps_max; g_heaps[i]->background_min_overflow_address = all_heaps_min; } #endif //!USE_REGIONS } dprintf(2, ("Starting all gc thread mark stack overflow processing")); bgc_t_join.restart(); } // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions // being visible. If there really was an overflow (process_mark_overflow returns true) then set the // global flag indicating that at least one object promotion may have occurred (the usual comment // about races applies). (Note it's OK to set this flag even if we're about to terminate the loop and // exit the method since we unconditionally set this variable on method entry anyway). if (background_process_mark_overflow (sc->concurrent)) s_fUnscannedPromotions = TRUE; // If we decided that no scan was required we can terminate the loop now. if (!s_fScanRequired) break; // Otherwise we must join with the other workers to ensure that all mark stack overflows have been // processed before we start scanning dependent handle tables (if overflows remain while we scan we // could miss noting the promotion of some primary objects). bgc_t_join.join(this, gc_join_rescan_dependent_handles); if (bgc_t_join.joined()) { dprintf(3, ("Starting all gc thread for dependent handle promotion")); bgc_t_join.restart(); } // If the portion of the dependent handle table managed by this worker has handles that could still be // promoted perform a rescan. If the rescan resulted in at least one promotion note this fact since it // could require a rescan of handles on this or other workers. if (GCScan::GcDhUnpromotedHandlesExist(sc)) if (GCScan::GcDhReScan(sc)) s_fUnscannedPromotions = TRUE; } } #else void gc_heap::background_scan_dependent_handles (ScanContext *sc) { // Whenever we call this method there may have been preceding object promotions. So set // fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set // based on the how the scanning proceeded). bool fUnscannedPromotions = true; // Scan dependent handles repeatedly until there are no further promotions that can be made or we made a // scan without performing any new promotions. while (GCScan::GcDhUnpromotedHandlesExist(sc) && fUnscannedPromotions) { // On each iteration of the loop start with the assumption that no further objects have been promoted. fUnscannedPromotions = false; // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions // being visible. If there was an overflow (background_process_mark_overflow returned true) then // additional objects now appear to be promoted and we should set the flag. if (background_process_mark_overflow (sc->concurrent)) fUnscannedPromotions = true; // Perform the scan and set the flag if any promotions resulted. if (GCScan::GcDhReScan (sc)) fUnscannedPromotions = true; } // Perform a last processing of any overflowed mark stack. background_process_mark_overflow (sc->concurrent); } #endif //MULTIPLE_HEAPS void gc_heap::recover_bgc_settings() { if ((settings.condemned_generation < max_generation) && gc_heap::background_running_p()) { dprintf (2, ("restoring bgc settings")); settings = saved_bgc_settings; GCHeap::GcCondemnedGeneration = gc_heap::settings.condemned_generation; } } void gc_heap::allow_fgc() { assert (bgc_thread == GCToEEInterface::GetThread()); bool bToggleGC = false; if (g_fSuspensionPending > 0) { bToggleGC = GCToEEInterface::EnablePreemptiveGC(); if (bToggleGC) { GCToEEInterface::DisablePreemptiveGC(); } } } BOOL gc_heap::is_bgc_in_progress() { return (background_running_p() || (current_bgc_state == bgc_initialized)); } void gc_heap::clear_commit_flag() { for (int i = get_start_generation_index(); i < total_generation_count; i++) { generation* gen = generation_of (i); heap_segment* seg = heap_segment_in_range (generation_start_segment (gen)); while (seg) { if (seg->flags & heap_segment_flags_ma_committed) { seg->flags &= ~heap_segment_flags_ma_committed; } if (seg->flags & heap_segment_flags_ma_pcommitted) { seg->flags &= ~heap_segment_flags_ma_pcommitted; } seg = heap_segment_next (seg); } } } void gc_heap::clear_commit_flag_global() { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { g_heaps[i]->clear_commit_flag(); } #else clear_commit_flag(); #endif //MULTIPLE_HEAPS } void gc_heap::verify_mark_array_cleared (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr) { #ifdef _DEBUG size_t markw = mark_word_of (begin); size_t markw_end = mark_word_of (end); while (markw < markw_end) { if (mark_array_addr[markw]) { uint8_t* addr = mark_word_address (markw); #ifdef USE_REGIONS heap_segment* region = region_of (addr); dprintf (1, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix, r: %Ix(%Ix)) were not cleared", markw, mark_array_addr[markw], addr, (size_t)region, heap_segment_mem (region))); #else dprintf (1, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", markw, mark_array_addr[markw], addr)); #endif //USE_REGIONS FATAL_GC_ERROR(); } markw++; } #else // _DEBUG UNREFERENCED_PARAMETER(begin); UNREFERENCED_PARAMETER(end); UNREFERENCED_PARAMETER(mark_array_addr); #endif //_DEBUG } uint8_t* gc_heap::get_start_address (heap_segment* seg) { uint8_t* start = #ifdef USE_REGIONS heap_segment_mem (seg); #else (heap_segment_read_only_p(seg) ? heap_segment_mem (seg) : (uint8_t*)seg); #endif //USE_REGIONS return start; } BOOL gc_heap::commit_mark_array_new_seg (gc_heap* hp, heap_segment* seg, uint32_t* new_card_table, uint8_t* new_lowest_address) { uint8_t* start = get_start_address (seg); uint8_t* end = heap_segment_reserved (seg); uint8_t* lowest = hp->background_saved_lowest_address; uint8_t* highest = hp->background_saved_highest_address; uint8_t* commit_start = NULL; uint8_t* commit_end = NULL; size_t commit_flag = 0; if ((highest >= start) && (lowest <= end)) { if ((start >= lowest) && (end <= highest)) { dprintf (GC_TABLE_LOG, ("completely in bgc range: seg %Ix-%Ix, bgc: %Ix-%Ix", start, end, lowest, highest)); commit_flag = heap_segment_flags_ma_committed; } else { dprintf (GC_TABLE_LOG, ("partially in bgc range: seg %Ix-%Ix, bgc: %Ix-%Ix", start, end, lowest, highest)); commit_flag = heap_segment_flags_ma_pcommitted; } commit_start = max (lowest, start); commit_end = min (highest, end); if (!commit_mark_array_by_range (commit_start, commit_end, hp->mark_array)) { return FALSE; } if (new_card_table == 0) { new_card_table = g_gc_card_table; } if (hp->card_table != new_card_table) { if (new_lowest_address == 0) { new_lowest_address = g_gc_lowest_address; } uint32_t* ct = &new_card_table[card_word (gcard_of (new_lowest_address))]; uint32_t* ma = (uint32_t*)((uint8_t*)card_table_mark_array (ct) - size_mark_array_of (0, new_lowest_address)); dprintf (GC_TABLE_LOG, ("table realloc-ed: %Ix->%Ix, MA: %Ix->%Ix", hp->card_table, new_card_table, hp->mark_array, ma)); if (!commit_mark_array_by_range (commit_start, commit_end, ma)) { return FALSE; } } seg->flags |= commit_flag; } return TRUE; } BOOL gc_heap::commit_mark_array_by_range (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr) { size_t beg_word = mark_word_of (begin); size_t end_word = mark_word_of (align_on_mark_word (end)); uint8_t* commit_start = align_lower_page ((uint8_t*)&mark_array_addr[beg_word]); uint8_t* commit_end = align_on_page ((uint8_t*)&mark_array_addr[end_word]); size_t size = (size_t)(commit_end - commit_start); #ifdef SIMPLE_DPRINTF dprintf (GC_TABLE_LOG, ("range: %Ix->%Ix mark word: %Ix->%Ix(%Id), mark array: %Ix->%Ix(%Id), commit %Ix->%Ix(%Id)", begin, end, beg_word, end_word, (end_word - beg_word) * sizeof (uint32_t), &mark_array_addr[beg_word], &mark_array_addr[end_word], (size_t)(&mark_array_addr[end_word] - &mark_array_addr[beg_word]), commit_start, commit_end, size)); #endif //SIMPLE_DPRINTF if (virtual_commit (commit_start, size, gc_oh_num::none)) { // We can only verify the mark array is cleared from begin to end, the first and the last // page aren't necessarily all cleared 'cause they could be used by other segments or // card bundle. verify_mark_array_cleared (begin, end, mark_array_addr); return TRUE; } else { dprintf (GC_TABLE_LOG, ("failed to commit %Id bytes", (end_word - beg_word) * sizeof (uint32_t))); return FALSE; } } BOOL gc_heap::commit_mark_array_with_check (heap_segment* seg, uint32_t* new_mark_array_addr) { uint8_t* start = get_start_address (seg); uint8_t* end = heap_segment_reserved (seg); #ifdef MULTIPLE_HEAPS uint8_t* lowest = heap_segment_heap (seg)->background_saved_lowest_address; uint8_t* highest = heap_segment_heap (seg)->background_saved_highest_address; #else uint8_t* lowest = background_saved_lowest_address; uint8_t* highest = background_saved_highest_address; #endif //MULTIPLE_HEAPS if ((highest >= start) && (lowest <= end)) { start = max (lowest, start); end = min (highest, end); if (!commit_mark_array_by_range (start, end, new_mark_array_addr)) { return FALSE; } } return TRUE; } BOOL gc_heap::commit_mark_array_by_seg (heap_segment* seg, uint32_t* mark_array_addr) { dprintf (GC_TABLE_LOG, ("seg: %Ix->%Ix; MA: %Ix", seg, heap_segment_reserved (seg), mark_array_addr)); uint8_t* start = get_start_address (seg); return commit_mark_array_by_range (start, heap_segment_reserved (seg), mark_array_addr); } BOOL gc_heap::commit_mark_array_bgc_init() { dprintf (GC_TABLE_LOG, ("BGC init commit: lowest: %Ix, highest: %Ix, mark_array: %Ix", lowest_address, highest_address, mark_array)); for (int i = get_start_generation_index(); i < total_generation_count; i++) { generation* gen = generation_of (i); heap_segment* seg = heap_segment_in_range (generation_start_segment (gen)); while (seg) { dprintf (GC_TABLE_LOG, ("h%d gen%d seg: %Ix(%Ix-%Ix), flags: %Id", heap_number, i, seg, heap_segment_mem (seg), heap_segment_allocated (seg), seg->flags)); if (!(seg->flags & heap_segment_flags_ma_committed)) { // For ro segments they could always be only partially in range so we'd // be calling this at the beginning of every BGC. We are not making this // more efficient right now - ro segments are currently only used by redhawk. if (heap_segment_read_only_p (seg)) { if ((heap_segment_mem (seg) >= lowest_address) && (heap_segment_reserved (seg) <= highest_address)) { if (commit_mark_array_by_seg (seg, mark_array)) { seg->flags |= heap_segment_flags_ma_committed; } else { return FALSE; } } else { uint8_t* start = max (lowest_address, heap_segment_mem (seg)); uint8_t* end = min (highest_address, heap_segment_reserved (seg)); if (commit_mark_array_by_range (start, end, mark_array)) { seg->flags |= heap_segment_flags_ma_pcommitted; } else { return FALSE; } } } else { // For normal segments they are by design completely in range so just // commit the whole mark array for each seg. if (commit_mark_array_by_seg (seg, mark_array)) { if (seg->flags & heap_segment_flags_ma_pcommitted) { seg->flags &= ~heap_segment_flags_ma_pcommitted; } seg->flags |= heap_segment_flags_ma_committed; } else { return FALSE; } } } seg = heap_segment_next (seg); } } return TRUE; } // This function doesn't check the commit flag since it's for a new array - // the mark_array flag for these segments will remain the same. BOOL gc_heap::commit_new_mark_array (uint32_t* new_mark_array_addr) { dprintf (GC_TABLE_LOG, ("committing existing segs on MA %Ix", new_mark_array_addr)); for (int i = get_start_generation_index(); i < total_generation_count; i++) { generation* gen = generation_of (i); heap_segment* seg = heap_segment_in_range (generation_start_segment (gen)); while (seg) { if (!commit_mark_array_with_check (seg, new_mark_array_addr)) { return FALSE; } seg = heap_segment_next (seg); } } #ifdef MULTIPLE_HEAPS if (new_heap_segment) { if (!commit_mark_array_with_check (new_heap_segment, new_mark_array_addr)) { return FALSE; } } #endif //MULTIPLE_HEAPS return TRUE; } BOOL gc_heap::commit_new_mark_array_global (uint32_t* new_mark_array) { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { if (!g_heaps[i]->commit_new_mark_array (new_mark_array)) { return FALSE; } } #else if (!commit_new_mark_array (new_mark_array)) { return FALSE; } #endif //MULTIPLE_HEAPS return TRUE; } void gc_heap::decommit_mark_array_by_seg (heap_segment* seg) { // if BGC is disabled (the finalize watchdog does this at shutdown), the mark array could have // been set to NULL. if (mark_array == NULL) { return; } dprintf (GC_TABLE_LOG, ("decommitting seg %Ix(%Ix), MA: %Ix", seg, seg->flags, mark_array)); size_t flags = seg->flags; if ((flags & heap_segment_flags_ma_committed) || (flags & heap_segment_flags_ma_pcommitted)) { uint8_t* start = get_start_address (seg); uint8_t* end = heap_segment_reserved (seg); if (flags & heap_segment_flags_ma_pcommitted) { start = max (lowest_address, start); end = min (highest_address, end); } size_t beg_word = mark_word_of (start); size_t end_word = mark_word_of (align_on_mark_word (end)); uint8_t* decommit_start = align_on_page ((uint8_t*)&mark_array[beg_word]); uint8_t* decommit_end = align_lower_page ((uint8_t*)&mark_array[end_word]); size_t size = (size_t)(decommit_end - decommit_start); #ifdef SIMPLE_DPRINTF dprintf (GC_TABLE_LOG, ("seg: %Ix mark word: %Ix->%Ix(%Id), mark array: %Ix->%Ix(%Id), decommit %Ix->%Ix(%Id)", seg, beg_word, end_word, (end_word - beg_word) * sizeof (uint32_t), &mark_array[beg_word], &mark_array[end_word], (size_t)(&mark_array[end_word] - &mark_array[beg_word]), decommit_start, decommit_end, size)); #endif //SIMPLE_DPRINTF if (decommit_start < decommit_end) { if (!virtual_decommit (decommit_start, size, gc_oh_num::none)) { dprintf (GC_TABLE_LOG, ("decommit on %Ix for %Id bytes failed", decommit_start, size)); assert (!"decommit failed"); } } dprintf (GC_TABLE_LOG, ("decommited [%Ix for address [%Ix", beg_word, seg)); } } bool gc_heap::should_update_end_mark_size() { return ((settings.condemned_generation == (max_generation - 1)) && (current_c_gc_state == c_gc_state_planning)); } void gc_heap::background_mark_phase () { verify_mark_array_cleared(); ScanContext sc; sc.thread_number = heap_number; sc.promotion = TRUE; sc.concurrent = FALSE; THREAD_FROM_HEAP; BOOL cooperative_mode = TRUE; #ifndef MULTIPLE_HEAPS const int thread = heap_number; #endif //!MULTIPLE_HEAPS dprintf(2,("-(GC%d)BMark-", VolatileLoad(&settings.gc_index))); assert (settings.concurrent); if (gen0_must_clear_bricks > 0) gen0_must_clear_bricks--; background_soh_alloc_count = 0; background_uoh_alloc_count = 0; bgc_overflow_count = 0; bpromoted_bytes (heap_number) = 0; static uint32_t num_sizedrefs = 0; #ifdef USE_REGIONS background_overflow_p = FALSE; #else background_min_overflow_address = MAX_PTR; background_max_overflow_address = 0; background_min_soh_overflow_address = MAX_PTR; background_max_soh_overflow_address = 0; #endif //USE_REGIONS processed_eph_overflow_p = FALSE; //set up the mark lists from g_mark_list assert (g_mark_list); mark_list = g_mark_list; //dont use the mark list for full gc //because multiple segments are more complex to handle and the list //is likely to overflow mark_list_end = &mark_list [0]; mark_list_index = &mark_list [0]; c_mark_list_index = 0; #ifndef MULTIPLE_HEAPS shigh = (uint8_t*) 0; slow = MAX_PTR; #endif //MULTIPLE_HEAPS generation* gen = generation_of (max_generation); dprintf(3,("BGC: stack marking")); sc.concurrent = TRUE; GCScan::GcScanRoots(background_promote_callback, max_generation, max_generation, &sc); dprintf(3,("BGC: finalization marking")); finalize_queue->GcScanRoots(background_promote_callback, heap_number, 0); size_t total_soh_size = generation_sizes (generation_of (max_generation)); size_t total_loh_size = generation_size (loh_generation); size_t total_poh_size = generation_size (poh_generation); bgc_begin_loh_size = total_loh_size; bgc_begin_poh_size = total_poh_size; bgc_loh_size_increased = 0; bgc_poh_size_increased = 0; background_soh_size_end_mark = 0; dprintf (GTC_LOG, ("BM: h%d: loh: %Id, soh: %Id, poh: %Id", heap_number, total_loh_size, total_soh_size, total_poh_size)); //concurrent_print_time_delta ("copying stack roots"); concurrent_print_time_delta ("CS"); FIRE_EVENT(BGC1stNonConEnd); #ifndef USE_REGIONS saved_overflow_ephemeral_seg = 0; #endif //!USE_REGIONS current_bgc_state = bgc_reset_ww; // we don't need a join here - just whichever thread that gets here // first can change the states and call restart_vm. // this is not true - we can't let the EE run when we are scanning stack. // since we now allow reset ww to run concurrently and have a join for it, // we can do restart ee on the 1st thread that got here. Make sure we handle the // sizedref handles correctly. #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_restart_ee); if (bgc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP // Resetting write watch for software write watch is pretty fast, much faster than for hardware write watch. Reset // can be done while the runtime is suspended or after the runtime is restarted, the preference was to reset while // the runtime is suspended. The reset for hardware write watch is done after the runtime is restarted below. concurrent_print_time_delta ("CRWW begin"); #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { g_heaps[i]->reset_write_watch (FALSE); } #else reset_write_watch (FALSE); #endif //MULTIPLE_HEAPS concurrent_print_time_delta ("CRWW"); #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles(); // this c_write is not really necessary because restart_vm // has an instruction that will flush the cpu cache (interlocked // or whatever) but we don't want to rely on that. dprintf (GTC_LOG, ("setting cm_in_progress")); c_write (cm_in_progress, TRUE); assert (dont_restart_ee_p); dont_restart_ee_p = FALSE; restart_vm(); GCToOSInterface::YieldThread (0); #ifdef MULTIPLE_HEAPS dprintf(3, ("Starting all gc threads for gc")); bgc_t_join.restart(); #endif //MULTIPLE_HEAPS } #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_after_reset); if (bgc_t_join.joined()) #endif //MULTIPLE_HEAPS { disable_preemptive (true); #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP // When software write watch is enabled, resetting write watch is done while the runtime is // suspended above. The post-reset call to revisit_written_pages is only necessary for concurrent // reset_write_watch, to discard dirtied pages during the concurrent reset. #ifdef WRITE_WATCH concurrent_print_time_delta ("CRWW begin"); #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { g_heaps[i]->reset_write_watch (TRUE); } #else reset_write_watch (TRUE); #endif //MULTIPLE_HEAPS concurrent_print_time_delta ("CRWW"); #endif //WRITE_WATCH #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { g_heaps[i]->revisit_written_pages (TRUE, TRUE); } #else revisit_written_pages (TRUE, TRUE); #endif //MULTIPLE_HEAPS concurrent_print_time_delta ("CRW"); #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { g_heaps[i]->current_bgc_state = bgc_mark_handles; } #else current_bgc_state = bgc_mark_handles; #endif //MULTIPLE_HEAPS current_c_gc_state = c_gc_state_marking; enable_preemptive (); #ifdef MULTIPLE_HEAPS dprintf(3, ("Joining BGC threads after resetting writewatch")); bgc_t_join.restart(); #endif //MULTIPLE_HEAPS } disable_preemptive (true); if (num_sizedrefs > 0) { GCScan::GcScanSizedRefs(background_promote, max_generation, max_generation, &sc); enable_preemptive (); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_scan_sizedref_done); if (bgc_t_join.joined()) { dprintf(3, ("Done with marking all sized refs. Starting all bgc thread for marking other strong roots")); bgc_t_join.restart(); } #endif //MULTIPLE_HEAPS disable_preemptive (true); } dprintf (3,("BGC: handle table marking")); GCScan::GcScanHandles(background_promote, max_generation, max_generation, &sc); //concurrent_print_time_delta ("concurrent marking handle table"); concurrent_print_time_delta ("CRH"); current_bgc_state = bgc_mark_stack; dprintf (2,("concurrent draining mark list")); background_drain_mark_list (thread); //concurrent_print_time_delta ("concurrent marking stack roots"); concurrent_print_time_delta ("CRS"); dprintf (2,("concurrent revisiting dirtied pages")); // tuning has shown that there are advantages in doing this 2 times revisit_written_pages (TRUE); revisit_written_pages (TRUE); //concurrent_print_time_delta ("concurrent marking dirtied pages on LOH"); concurrent_print_time_delta ("CRre"); enable_preemptive (); #if defined(MULTIPLE_HEAPS) && !defined(USE_REGIONS) bgc_t_join.join(this, gc_join_concurrent_overflow); if (bgc_t_join.joined()) { uint8_t* all_heaps_max = 0; uint8_t* all_heaps_min = MAX_PTR; int i; for (i = 0; i < n_heaps; i++) { dprintf (3, ("heap %d overflow max is %Ix, min is %Ix", i, g_heaps[i]->background_max_overflow_address, g_heaps[i]->background_min_overflow_address)); if (all_heaps_max < g_heaps[i]->background_max_overflow_address) all_heaps_max = g_heaps[i]->background_max_overflow_address; if (all_heaps_min > g_heaps[i]->background_min_overflow_address) all_heaps_min = g_heaps[i]->background_min_overflow_address; } for (i = 0; i < n_heaps; i++) { g_heaps[i]->background_max_overflow_address = all_heaps_max; g_heaps[i]->background_min_overflow_address = all_heaps_min; } dprintf(3, ("Starting all bgc threads after updating the overflow info")); bgc_t_join.restart(); } #endif //MULTIPLE_HEAPS && !USE_REGIONS disable_preemptive (true); dprintf (2, ("before CRov count: %d", bgc_overflow_count)); bgc_overflow_count = 0; background_process_mark_overflow (TRUE); dprintf (2, ("after CRov count: %d", bgc_overflow_count)); bgc_overflow_count = 0; //concurrent_print_time_delta ("concurrent processing mark overflow"); concurrent_print_time_delta ("CRov"); // Stop all threads, crawl all stacks and revisit changed pages. FIRE_EVENT(BGC1stConEnd); dprintf (2, ("Stopping the EE")); enable_preemptive (); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_suspend_ee); if (bgc_t_join.joined()) { bgc_threads_sync_event.Reset(); dprintf(3, ("Joining BGC threads for non concurrent final marking")); bgc_t_join.restart(); } #endif //MULTIPLE_HEAPS if (heap_number == 0) { enter_spin_lock (&gc_lock); suspended_start_time = GetHighPrecisionTimeStamp(); bgc_suspend_EE (); //suspend_EE (); bgc_threads_sync_event.Set(); } else { bgc_threads_sync_event.Wait(INFINITE, FALSE); dprintf (2, ("bgc_threads_sync_event is signalled")); } assert (settings.concurrent); assert (settings.condemned_generation == max_generation); dprintf (2, ("clearing cm_in_progress")); c_write (cm_in_progress, FALSE); bgc_alloc_lock->check(); current_bgc_state = bgc_final_marking; //concurrent_print_time_delta ("concurrent marking ended"); concurrent_print_time_delta ("CR"); FIRE_EVENT(BGC2ndNonConBegin); mark_absorb_new_alloc(); #ifdef FEATURE_EVENT_TRACE static uint64_t current_mark_time = 0; static uint64_t last_mark_time = 0; #endif //FEATURE_EVENT_TRACE // We need a join here 'cause find_object would complain if the gen0 // bricks of another heap haven't been fixed up. So we need to make sure // that every heap's gen0 bricks are fixed up before we proceed. #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_after_absorb); if (bgc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef BGC_SERVO_TUNING bgc_tuning::record_bgc_sweep_start(); #endif //BGC_SERVO_TUNING GCToEEInterface::BeforeGcScanRoots(max_generation, /* is_bgc */ true, /* is_concurrent */ false); #ifdef FEATURE_EVENT_TRACE informational_event_enabled_p = EVENT_ENABLED (GCMarkWithType); if (informational_event_enabled_p) last_mark_time = GetHighPrecisionTimeStamp(); #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS dprintf(3, ("Joining BGC threads after absorb")); bgc_t_join.restart(); #endif //MULTIPLE_HEAPS } //reset the flag, indicating that the EE no longer expect concurrent //marking sc.concurrent = FALSE; total_soh_size = generation_sizes (generation_of (max_generation)); total_loh_size = generation_size (loh_generation); total_poh_size = generation_size (poh_generation); dprintf (GTC_LOG, ("FM: h%d: loh: %Id, soh: %Id, poh: %Id", heap_number, total_loh_size, total_soh_size, total_poh_size)); dprintf (2, ("nonconcurrent marking stack roots")); GCScan::GcScanRoots(background_promote, max_generation, max_generation, &sc); //concurrent_print_time_delta ("nonconcurrent marking stack roots"); concurrent_print_time_delta ("NRS"); finalize_queue->GcScanRoots(background_promote, heap_number, 0); dprintf (2, ("nonconcurrent marking handle table")); GCScan::GcScanHandles(background_promote, max_generation, max_generation, &sc); //concurrent_print_time_delta ("nonconcurrent marking handle table"); concurrent_print_time_delta ("NRH"); dprintf (2,("---- (GC%d)final going through written pages ----", VolatileLoad(&settings.gc_index))); revisit_written_pages (FALSE); //concurrent_print_time_delta ("nonconcurrent revisit dirtied pages on LOH"); concurrent_print_time_delta ("NRre LOH"); dprintf (2, ("before NR 1st Hov count: %d", bgc_overflow_count)); bgc_overflow_count = 0; // Dependent handles need to be scanned with a special algorithm (see the header comment on // scan_dependent_handles for more detail). We perform an initial scan without processing any mark // stack overflow. This is not guaranteed to complete the operation but in a common case (where there // are no dependent handles that are due to be collected) it allows us to optimize away further scans. // The call to background_scan_dependent_handles is what will cycle through more iterations if // required and will also perform processing of any mark stack overflow once the dependent handle // table has been fully promoted. dprintf (2, ("1st dependent handle scan and process mark overflow")); GCScan::GcDhInitialScan(background_promote, max_generation, max_generation, &sc); background_scan_dependent_handles (&sc); //concurrent_print_time_delta ("1st nonconcurrent dependent handle scan and process mark overflow"); concurrent_print_time_delta ("NR 1st Hov"); dprintf (2, ("after NR 1st Hov count: %d", bgc_overflow_count)); bgc_overflow_count = 0; #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_null_dead_short_weak); if (bgc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_EVENT_TRACE bgc_time_info[time_mark_sizedref] = 0; record_mark_time (bgc_time_info[time_mark_roots], current_mark_time, last_mark_time); #endif //FEATURE_EVENT_TRACE #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP // The runtime is suspended, take this opportunity to pause tracking written pages to // avoid further perf penalty after the runtime is restarted SoftwareWriteWatch::DisableForGCHeap(); #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP GCToEEInterface::AfterGcScanRoots (max_generation, max_generation, &sc); #ifdef MULTIPLE_HEAPS dprintf(3, ("Joining BGC threads for short weak handle scan")); bgc_t_join.restart(); #endif //MULTIPLE_HEAPS } // null out the target of short weakref that were not promoted. GCScan::GcShortWeakPtrScan(max_generation, max_generation, &sc); //concurrent_print_time_delta ("bgc GcShortWeakPtrScan"); concurrent_print_time_delta ("NR GcShortWeakPtrScan"); { #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_scan_finalization); if (bgc_t_join.joined()) { #endif //MULTIPLE_HEAPS #ifdef FEATURE_EVENT_TRACE record_mark_time (bgc_time_info[time_mark_short_weak], current_mark_time, last_mark_time); #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS dprintf(3, ("Joining BGC threads for finalization")); bgc_t_join.restart(); } #endif //MULTIPLE_HEAPS dprintf(3,("Marking finalization data")); //concurrent_print_time_delta ("bgc joined to mark finalization"); concurrent_print_time_delta ("NRj"); finalize_queue->ScanForFinalization (background_promote, max_generation, FALSE, __this); concurrent_print_time_delta ("NRF"); } dprintf (2, ("before NR 2nd Hov count: %d", bgc_overflow_count)); bgc_overflow_count = 0; // Scan dependent handles again to promote any secondaries associated with primaries that were promoted // for finalization. As before background_scan_dependent_handles will also process any mark stack // overflow. dprintf (2, ("2nd dependent handle scan and process mark overflow")); background_scan_dependent_handles (&sc); //concurrent_print_time_delta ("2nd nonconcurrent dependent handle scan and process mark overflow"); concurrent_print_time_delta ("NR 2nd Hov"); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_null_dead_long_weak); if (bgc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_EVENT_TRACE record_mark_time (bgc_time_info[time_mark_scan_finalization], current_mark_time, last_mark_time); #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS dprintf(2, ("Joining BGC threads for weak pointer deletion")); bgc_t_join.restart(); #endif //MULTIPLE_HEAPS } // null out the target of long weakref that were not promoted. GCScan::GcWeakPtrScan (max_generation, max_generation, &sc); concurrent_print_time_delta ("NR GcWeakPtrScan"); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_null_dead_syncblk); if (bgc_t_join.joined()) #endif //MULTIPLE_HEAPS { dprintf (2, ("calling GcWeakPtrScanBySingleThread")); // scan for deleted entries in the syncblk cache GCScan::GcWeakPtrScanBySingleThread (max_generation, max_generation, &sc); #ifdef FEATURE_EVENT_TRACE record_mark_time (bgc_time_info[time_mark_long_weak], current_mark_time, last_mark_time); #endif //FEATURE_EVENT_TRACE concurrent_print_time_delta ("NR GcWeakPtrScanBySingleThread"); #ifdef MULTIPLE_HEAPS dprintf(2, ("Starting BGC threads for end of background mark phase")); bgc_t_join.restart(); #endif //MULTIPLE_HEAPS } dprintf (2, ("end of bgc mark: loh: %d, poh: %d, soh: %d", generation_size (loh_generation), generation_size (poh_generation), generation_sizes (generation_of (max_generation)))); for (int gen_idx = max_generation; gen_idx < total_generation_count; gen_idx++) { generation* gen = generation_of (gen_idx); dynamic_data* dd = dynamic_data_of (gen_idx); dd_begin_data_size (dd) = generation_size (gen_idx) - (generation_free_list_space (gen) + generation_free_obj_space (gen)) - get_generation_start_size (gen_idx); dd_survived_size (dd) = 0; dd_pinned_survived_size (dd) = 0; dd_artificial_pinned_survived_size (dd) = 0; dd_added_pinned_size (dd) = 0; } for (int i = get_start_generation_index(); i < uoh_start_generation; i++) { heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (i))); PREFIX_ASSUME(seg != NULL); while (seg) { seg->flags &= ~heap_segment_flags_swept; #ifndef USE_REGIONS if (heap_segment_allocated (seg) == heap_segment_mem (seg)) { FATAL_GC_ERROR(); } if (seg == ephemeral_heap_segment) { heap_segment_background_allocated (seg) = generation_allocation_start (generation_of (max_generation - 1)); } else #endif //!USE_REGIONS { heap_segment_background_allocated (seg) = heap_segment_allocated (seg); } background_soh_size_end_mark += heap_segment_background_allocated (seg) - heap_segment_mem (seg); dprintf (3333, ("h%d gen%d seg %Ix (%Ix) background allocated is %Ix", heap_number, i, (size_t)(seg), heap_segment_mem (seg), heap_segment_background_allocated (seg))); seg = heap_segment_next_rw (seg); } } // We need to void alloc contexts here 'cause while background_ephemeral_sweep is running // we can't let the user code consume the left over parts in these alloc contexts. repair_allocation_contexts (FALSE); dprintf (2, ("end of bgc mark: gen2 free list space: %d, free obj space: %d", generation_free_list_space (generation_of (max_generation)), generation_free_obj_space (generation_of (max_generation)))); dprintf(2,("---- (GC%d)End of background mark phase ----", VolatileLoad(&settings.gc_index))); } void gc_heap::suspend_EE () { dprintf (2, ("suspend_EE")); #ifdef MULTIPLE_HEAPS gc_heap* hp = gc_heap::g_heaps[0]; GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP); #else GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP); #endif //MULTIPLE_HEAPS } #ifdef MULTIPLE_HEAPS void gc_heap::bgc_suspend_EE () { for (int i = 0; i < n_heaps; i++) { gc_heap::g_heaps[i]->reset_gc_done(); } gc_started = TRUE; dprintf (2, ("bgc_suspend_EE")); GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP); gc_started = FALSE; for (int i = 0; i < n_heaps; i++) { gc_heap::g_heaps[i]->set_gc_done(); } } #else void gc_heap::bgc_suspend_EE () { reset_gc_done(); gc_started = TRUE; dprintf (2, ("bgc_suspend_EE")); GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP); gc_started = FALSE; set_gc_done(); } #endif //MULTIPLE_HEAPS void gc_heap::restart_EE () { dprintf (2, ("restart_EE")); #ifdef MULTIPLE_HEAPS GCToEEInterface::RestartEE(FALSE); #else GCToEEInterface::RestartEE(FALSE); #endif //MULTIPLE_HEAPS } inline uint8_t* gc_heap::high_page (heap_segment* seg, BOOL concurrent_p) { #ifdef USE_REGIONS assert (!concurrent_p || (heap_segment_gen_num (seg) >= max_generation)); #else if (concurrent_p) { uint8_t* end = ((seg == ephemeral_heap_segment) ? generation_allocation_start (generation_of (max_generation - 1)) : heap_segment_allocated (seg)); return align_lower_page (end); } else #endif //USE_REGIONS { return heap_segment_allocated (seg); } } void gc_heap::revisit_written_page (uint8_t* page, uint8_t* end, BOOL concurrent_p, uint8_t*& last_page, uint8_t*& last_object, BOOL large_objects_p, size_t& num_marked_objects) { uint8_t* start_address = page; uint8_t* o = 0; int align_const = get_alignment_constant (!large_objects_p); uint8_t* high_address = end; uint8_t* current_lowest_address = background_saved_lowest_address; uint8_t* current_highest_address = background_saved_highest_address; BOOL no_more_loop_p = FALSE; THREAD_FROM_HEAP; #ifndef MULTIPLE_HEAPS const int thread = heap_number; #endif //!MULTIPLE_HEAPS if (large_objects_p) { o = last_object; } else { if (((last_page + WRITE_WATCH_UNIT_SIZE) == page) || (start_address <= last_object)) { o = last_object; } else { o = find_first_object (start_address, last_object); // We can visit the same object again, but on a different page. assert (o >= last_object); } } dprintf (3,("page %Ix start: %Ix, %Ix[ ", (size_t)page, (size_t)o, (size_t)(min (high_address, page + WRITE_WATCH_UNIT_SIZE)))); while (o < (min (high_address, page + WRITE_WATCH_UNIT_SIZE))) { size_t s; if (concurrent_p && large_objects_p) { bgc_alloc_lock->bgc_mark_set (o); if (((CObjectHeader*)o)->IsFree()) { s = unused_array_size (o); } else { s = size (o); } } else { s = size (o); } dprintf (3,("Considering object %Ix(%s)", (size_t)o, (background_object_marked (o, FALSE) ? "bm" : "nbm"))); assert (Align (s) >= Align (min_obj_size)); uint8_t* next_o = o + Align (s, align_const); if (next_o >= start_address) { #ifdef MULTIPLE_HEAPS if (concurrent_p) { // We set last_object here for SVR BGC here because SVR BGC has more than // one GC thread. When we have more than one GC thread we would run into this // situation if we skipped unmarked objects: // bgc thread 1 calls GWW, and detect object X not marked so it would skip it // for revisit. // bgc thread 2 marks X and all its current children. // user thread comes along and dirties more (and later) pages in X. // bgc thread 1 calls GWW again and gets those later pages but it will not mark anything // on them because it had already skipped X. We need to detect that this object is now // marked and mark the children on the dirtied pages. // In the future if we have less BGC threads than we have heaps we should add // the check to the number of BGC threads. last_object = o; } #endif //MULTIPLE_HEAPS if (contain_pointers (o) && (!((o >= current_lowest_address) && (o < current_highest_address)) || background_marked (o))) { dprintf (3, ("going through %Ix", (size_t)o)); go_through_object (method_table(o), o, s, poo, start_address, use_start, (o + s), if ((uint8_t*)poo >= min (high_address, page + WRITE_WATCH_UNIT_SIZE)) { no_more_loop_p = TRUE; goto end_limit; } uint8_t* oo = *poo; num_marked_objects++; background_mark_object (oo THREAD_NUMBER_ARG); ); } else if ( concurrent_p && ((CObjectHeader*)o)->IsFree() && (next_o > min (high_address, page + WRITE_WATCH_UNIT_SIZE))) { // We need to not skip the object here because of this corner scenario: // A large object was being allocated during BGC mark so we first made it // into a free object, then cleared its memory. In this loop we would detect // that it's a free object which normally we would skip. But by the next time // we call GetWriteWatch we could still be on this object and the object had // been made into a valid object and some of its memory was changed. We need // to be sure to process those written pages so we can't skip the object just // yet. // // Similarly, when using software write watch, don't advance last_object when // the current object is a free object that spans beyond the current page or // high_address. Software write watch acquires gc_lock before the concurrent // GetWriteWatch() call during revisit_written_pages(). A foreground GC may // happen at that point and allocate from this free region, so when // revisit_written_pages() continues, it cannot skip now-valid objects in this // region. no_more_loop_p = TRUE; goto end_limit; } } end_limit: if (concurrent_p && large_objects_p) { bgc_alloc_lock->bgc_mark_done (); } if (no_more_loop_p) { break; } o = next_o; } #ifdef MULTIPLE_HEAPS if (concurrent_p) { assert (last_object < (min (high_address, page + WRITE_WATCH_UNIT_SIZE))); } else #endif //MULTIPLE_HEAPS { last_object = o; } dprintf (3,("Last object: %Ix", (size_t)last_object)); last_page = align_write_watch_lower_page (o); if (concurrent_p) { allow_fgc(); } } // When reset_only_p is TRUE, we should only reset pages that are in range // because we need to consider the segments or part of segments that were // allocated out of range all live. void gc_heap::revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p) { if (concurrent_p && !reset_only_p) { current_bgc_state = bgc_revisit_soh; } size_t total_dirtied_pages = 0; size_t total_marked_objects = 0; bool reset_watch_state = !!concurrent_p; bool is_runtime_suspended = !concurrent_p; BOOL small_object_segments = TRUE; int start_gen_idx = get_start_generation_index(); #ifdef USE_REGIONS if (concurrent_p && !reset_only_p) { // We don't go into ephemeral regions during concurrent revisit. start_gen_idx = max_generation; } #endif //USE_REGIONS for (int i = start_gen_idx; i < total_generation_count; i++) { heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (i))); PREFIX_ASSUME(seg != NULL); while (seg) { uint8_t* base_address = (uint8_t*)heap_segment_mem (seg); //we need to truncate to the base of the page because //some newly allocated could exist beyond heap_segment_allocated //and if we reset the last page write watch status, // they wouldn't be guaranteed to be visited -> gc hole. uintptr_t bcount = array_size; uint8_t* last_page = 0; uint8_t* last_object = heap_segment_mem (seg); uint8_t* high_address = 0; BOOL skip_seg_p = FALSE; if (reset_only_p) { if ((heap_segment_mem (seg) >= background_saved_lowest_address) || (heap_segment_reserved (seg) <= background_saved_highest_address)) { dprintf (3, ("h%d: sseg: %Ix(-%Ix)", heap_number, heap_segment_mem (seg), heap_segment_reserved (seg))); skip_seg_p = TRUE; } } if (!skip_seg_p) { dprintf (3, ("looking at seg %Ix", (size_t)last_object)); if (reset_only_p) { base_address = max (base_address, background_saved_lowest_address); dprintf (3, ("h%d: reset only starting %Ix", heap_number, base_address)); } dprintf (3, ("h%d: starting: %Ix, seg %Ix-%Ix", heap_number, base_address, heap_segment_mem (seg), heap_segment_reserved (seg))); while (1) { if (reset_only_p) { high_address = ((seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg)); high_address = min (high_address, background_saved_highest_address); } else { high_address = high_page (seg, concurrent_p); } if ((base_address < high_address) && (bcount >= array_size)) { ptrdiff_t region_size = high_address - base_address; dprintf (3, ("h%d: gw: [%Ix(%Id)", heap_number, (size_t)base_address, (size_t)region_size)); #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP // When the runtime is not suspended, it's possible for the table to be resized concurrently with the scan // for dirty pages below. Prevent that by synchronizing with grow_brick_card_tables(). When the runtime is // suspended, it's ok to scan for dirty pages concurrently from multiple background GC threads for disjoint // memory regions. if (!is_runtime_suspended) { enter_spin_lock(&gc_lock); } #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP get_write_watch_for_gc_heap (reset_watch_state, base_address, region_size, (void**)background_written_addresses, &bcount, is_runtime_suspended); #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP if (!is_runtime_suspended) { leave_spin_lock(&gc_lock); } #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP if (bcount != 0) { total_dirtied_pages += bcount; dprintf (3, ("Found %d pages [%Ix, %Ix[", bcount, (size_t)base_address, (size_t)high_address)); } if (!reset_only_p) { for (unsigned i = 0; i < bcount; i++) { uint8_t* page = (uint8_t*)background_written_addresses[i]; dprintf (3, ("looking at page %d at %Ix(h: %Ix)", i, (size_t)page, (size_t)high_address)); if (page < high_address) { //search for marked objects in the page revisit_written_page (page, high_address, concurrent_p, last_page, last_object, !small_object_segments, total_marked_objects); } else { dprintf (3, ("page %d at %Ix is >= %Ix!", i, (size_t)page, (size_t)high_address)); assert (!"page shouldn't have exceeded limit"); } } } if (bcount >= array_size){ base_address = background_written_addresses [array_size-1] + WRITE_WATCH_UNIT_SIZE; bcount = array_size; } } else { break; } } } seg = heap_segment_next_rw (seg); } if (i == soh_gen2) { if (!reset_only_p) { dprintf (GTC_LOG, ("h%d: SOH: dp:%Id; mo: %Id", heap_number, total_dirtied_pages, total_marked_objects)); fire_revisit_event (total_dirtied_pages, total_marked_objects, FALSE); concurrent_print_time_delta (concurrent_p ? "CR SOH" : "NR SOH"); total_dirtied_pages = 0; total_marked_objects = 0; } if (concurrent_p && !reset_only_p) { current_bgc_state = bgc_revisit_uoh; } small_object_segments = FALSE; dprintf (3, ("now revisiting large object segments")); } else { if (reset_only_p) { dprintf (GTC_LOG, ("h%d: tdp: %Id", heap_number, total_dirtied_pages)); } else { dprintf (GTC_LOG, ("h%d: LOH: dp:%Id; mo: %Id", heap_number, total_dirtied_pages, total_marked_objects)); fire_revisit_event (total_dirtied_pages, total_marked_objects, TRUE); } } } } void gc_heap::background_grow_c_mark_list() { assert (c_mark_list_index >= c_mark_list_length); BOOL should_drain_p = FALSE; THREAD_FROM_HEAP; #ifndef MULTIPLE_HEAPS const int thread = heap_number; #endif //!MULTIPLE_HEAPS dprintf (2, ("stack copy buffer overflow")); uint8_t** new_c_mark_list = 0; { FAULT_NOT_FATAL(); if (c_mark_list_length >= (SIZE_T_MAX / (2 * sizeof (uint8_t*)))) { should_drain_p = TRUE; } else { new_c_mark_list = new (nothrow) uint8_t*[c_mark_list_length*2]; if (new_c_mark_list == 0) { should_drain_p = TRUE; } } } if (should_drain_p) { dprintf (2, ("No more memory for the stacks copy, draining..")); //drain the list by marking its elements background_drain_mark_list (thread); } else { assert (new_c_mark_list); memcpy (new_c_mark_list, c_mark_list, c_mark_list_length*sizeof(uint8_t*)); c_mark_list_length = c_mark_list_length*2; delete c_mark_list; c_mark_list = new_c_mark_list; } } void gc_heap::background_promote_callback (Object** ppObject, ScanContext* sc, uint32_t flags) { UNREFERENCED_PARAMETER(sc); //in order to save space on the array, mark the object, //knowing that it will be visited later assert (settings.concurrent); THREAD_NUMBER_FROM_CONTEXT; #ifndef MULTIPLE_HEAPS const int thread = 0; #endif //!MULTIPLE_HEAPS uint8_t* o = (uint8_t*)*ppObject; if (o == 0) return; HEAP_FROM_THREAD; gc_heap* hp = gc_heap::heap_of (o); if ((o < hp->background_saved_lowest_address) || (o >= hp->background_saved_highest_address)) { return; } if (flags & GC_CALL_INTERIOR) { o = hp->find_object (o); if (o == 0) return; } #ifdef FEATURE_CONSERVATIVE_GC // For conservative GC, a value on stack may point to middle of a free object. // In this case, we don't need to promote the pointer. if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree()) { return; } #endif //FEATURE_CONSERVATIVE_GC #ifdef _DEBUG ((CObjectHeader*)o)->Validate(); #endif //_DEBUG dprintf (3, ("Concurrent Background Promote %Ix", (size_t)o)); if (o && (size (o) > loh_size_threshold)) { dprintf (3, ("Brc %Ix", (size_t)o)); } if (hpt->c_mark_list_index >= hpt->c_mark_list_length) { hpt->background_grow_c_mark_list(); } dprintf (3, ("pushing %08x into mark_list", (size_t)o)); hpt->c_mark_list [hpt->c_mark_list_index++] = o; STRESS_LOG3(LF_GC|LF_GCROOTS, LL_INFO1000000, " GCHeap::Background Promote: Promote GC Root *%p = %p MT = %pT", ppObject, o, o ? ((Object*) o)->GetGCSafeMethodTable() : NULL); } void gc_heap::mark_absorb_new_alloc() { fix_allocation_contexts (FALSE); gen0_bricks_cleared = FALSE; clear_gen0_bricks(); } BOOL gc_heap::prepare_bgc_thread(gc_heap* gh) { BOOL success = FALSE; BOOL thread_created = FALSE; dprintf (2, ("Preparing gc thread")); gh->bgc_threads_timeout_cs.Enter(); if (!(gh->bgc_thread_running)) { dprintf (2, ("GC thread not running")); if ((gh->bgc_thread == 0) && create_bgc_thread(gh)) { success = TRUE; thread_created = TRUE; } } else { dprintf (3, ("GC thread already running")); success = TRUE; } gh->bgc_threads_timeout_cs.Leave(); if(thread_created) FIRE_EVENT(GCCreateConcurrentThread_V1); return success; } BOOL gc_heap::create_bgc_thread(gc_heap* gh) { assert (background_gc_done_event.IsValid()); //dprintf (2, ("Creating BGC thread")); gh->bgc_thread_running = GCToEEInterface::CreateThread(gh->bgc_thread_stub, gh, true, ".NET BGC"); return gh->bgc_thread_running; } BOOL gc_heap::create_bgc_threads_support (int number_of_heaps) { BOOL ret = FALSE; dprintf (3, ("Creating concurrent GC thread for the first time")); if (!background_gc_done_event.CreateManualEventNoThrow(TRUE)) { goto cleanup; } if (!bgc_threads_sync_event.CreateManualEventNoThrow(FALSE)) { goto cleanup; } if (!ee_proceed_event.CreateAutoEventNoThrow(FALSE)) { goto cleanup; } if (!bgc_start_event.CreateManualEventNoThrow(FALSE)) { goto cleanup; } #ifdef MULTIPLE_HEAPS bgc_t_join.init (number_of_heaps, join_flavor_bgc); #else UNREFERENCED_PARAMETER(number_of_heaps); #endif //MULTIPLE_HEAPS ret = TRUE; cleanup: if (!ret) { if (background_gc_done_event.IsValid()) { background_gc_done_event.CloseEvent(); } if (bgc_threads_sync_event.IsValid()) { bgc_threads_sync_event.CloseEvent(); } if (ee_proceed_event.IsValid()) { ee_proceed_event.CloseEvent(); } if (bgc_start_event.IsValid()) { bgc_start_event.CloseEvent(); } } return ret; } BOOL gc_heap::create_bgc_thread_support() { uint8_t** parr; //needs to have room for enough smallest objects fitting on a page parr = new (nothrow) uint8_t*[1 + OS_PAGE_SIZE / MIN_OBJECT_SIZE]; if (!parr) { return FALSE; } make_c_mark_list (parr); return TRUE; } int gc_heap::check_for_ephemeral_alloc() { int gen = ((settings.reason == reason_oos_soh) ? (max_generation - 1) : -1); if (gen == -1) { #ifdef MULTIPLE_HEAPS for (int heap_index = 0; heap_index < n_heaps; heap_index++) #endif //MULTIPLE_HEAPS { for (int i = 0; i < max_generation; i++) { #ifdef MULTIPLE_HEAPS if (g_heaps[heap_index]->get_new_allocation (i) <= 0) #else if (get_new_allocation (i) <= 0) #endif //MULTIPLE_HEAPS { gen = max (gen, i); } else break; } } } return gen; } // Wait for gc to finish sequential part void gc_heap::wait_to_proceed() { assert (background_gc_done_event.IsValid()); assert (bgc_start_event.IsValid()); user_thread_wait(&ee_proceed_event, FALSE); } // Start a new concurrent gc void gc_heap::start_c_gc() { assert (background_gc_done_event.IsValid()); assert (bgc_start_event.IsValid()); //Need to make sure that the gc thread is in the right place. background_gc_done_event.Wait(INFINITE, FALSE); background_gc_done_event.Reset(); bgc_start_event.Set(); } void gc_heap::do_background_gc() { dprintf (2, ("starting a BGC")); #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { g_heaps[i]->init_background_gc(); } #else init_background_gc(); #endif //MULTIPLE_HEAPS #ifdef BGC_SERVO_TUNING bgc_tuning::record_bgc_start(); #endif //BGC_SERVO_TUNING //start the background gc start_c_gc (); //wait until we get restarted by the BGC. wait_to_proceed(); } void gc_heap::kill_gc_thread() { //assert (settings.concurrent == FALSE); // We are doing a two-stage shutdown now. // In the first stage, we do minimum work, and call ExitProcess at the end. // In the secodn stage, we have the Loader lock and only one thread is // alive. Hence we do not need to kill gc thread. background_gc_done_event.CloseEvent(); bgc_start_event.CloseEvent(); bgc_threads_timeout_cs.Destroy(); bgc_thread = 0; } void gc_heap::bgc_thread_function() { assert (background_gc_done_event.IsValid()); assert (bgc_start_event.IsValid()); dprintf (3, ("gc_thread thread starting...")); BOOL do_exit = FALSE; bool cooperative_mode = true; bgc_thread_id.SetToCurrentThread(); dprintf (1, ("bgc_thread_id is set to %x", (uint32_t)GCToOSInterface::GetCurrentThreadIdForLogging())); while (1) { // Wait for work to do... dprintf (3, ("bgc thread: waiting...")); cooperative_mode = enable_preemptive (); //current_thread->m_fPreemptiveGCDisabled = 0; uint32_t result = bgc_start_event.Wait( #ifdef _DEBUG #ifdef MULTIPLE_HEAPS INFINITE, #else 2000, #endif //MULTIPLE_HEAPS #else //_DEBUG #ifdef MULTIPLE_HEAPS INFINITE, #else 20000, #endif //MULTIPLE_HEAPS #endif //_DEBUG FALSE); dprintf (2, ("gc thread: finished waiting")); // not calling disable_preemptive here 'cause we // can't wait for GC complete here - RestartEE will be called // when we've done the init work. if (result == WAIT_TIMEOUT) { // Should join the bgc threads and terminate all of them // at once. dprintf (1, ("GC thread timeout")); bgc_threads_timeout_cs.Enter(); if (!keep_bgc_threads_p) { dprintf (2, ("GC thread exiting")); bgc_thread_running = FALSE; bgc_thread = 0; bgc_thread_id.Clear(); do_exit = TRUE; } bgc_threads_timeout_cs.Leave(); if (do_exit) break; else { dprintf (3, ("GC thread needed, not exiting")); continue; } } // if we signal the thread with no concurrent work to do -> exit if (!settings.concurrent) { dprintf (3, ("no concurrent GC needed, exiting")); break; } gc_background_running = TRUE; dprintf (2, (ThreadStressLog::gcStartBgcThread(), heap_number, generation_free_list_space (generation_of (max_generation)), generation_free_obj_space (generation_of (max_generation)), dd_fragmentation (dynamic_data_of (max_generation)))); gc1(); #ifndef DOUBLY_LINKED_FL current_bgc_state = bgc_not_in_process; #endif //!DOUBLY_LINKED_FL enable_preemptive (); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_done); if (bgc_t_join.joined()) #endif //MULTIPLE_HEAPS { enter_spin_lock (&gc_lock); dprintf (SPINLOCK_LOG, ("bgc Egc")); bgc_start_event.Reset(); do_post_gc(); #ifdef MULTIPLE_HEAPS for (int gen = max_generation; gen < total_generation_count; gen++) { size_t desired_per_heap = 0; size_t total_desired = 0; gc_heap* hp = 0; dynamic_data* dd; for (int i = 0; i < n_heaps; i++) { hp = g_heaps[i]; dd = hp->dynamic_data_of (gen); size_t temp_total_desired = total_desired + dd_desired_allocation (dd); if (temp_total_desired < total_desired) { // we overflowed. total_desired = (size_t)MAX_PTR; break; } total_desired = temp_total_desired; } desired_per_heap = Align ((total_desired/n_heaps), get_alignment_constant (FALSE)); if (gen >= loh_generation) { desired_per_heap = exponential_smoothing (gen, dd_collection_count (dynamic_data_of (max_generation)), desired_per_heap); } for (int i = 0; i < n_heaps; i++) { hp = gc_heap::g_heaps[i]; dd = hp->dynamic_data_of (gen); dd_desired_allocation (dd) = desired_per_heap; dd_gc_new_allocation (dd) = desired_per_heap; dd_new_allocation (dd) = desired_per_heap; } } #endif //MULTIPLE_HEAPS #ifdef MULTIPLE_HEAPS fire_pevents(); #endif //MULTIPLE_HEAPS c_write (settings.concurrent, FALSE); gc_background_running = FALSE; keep_bgc_threads_p = FALSE; background_gc_done_event.Set(); dprintf (SPINLOCK_LOG, ("bgc Lgc")); leave_spin_lock (&gc_lock); #ifdef MULTIPLE_HEAPS dprintf(1, ("End of BGC - starting all BGC threads")); bgc_t_join.restart(); #endif //MULTIPLE_HEAPS } // We can't disable preempt here because there might've been a GC already // started and decided to do a BGC and waiting for a BGC thread to restart // vm. That GC will be waiting in wait_to_proceed and we are waiting for it // to restart the VM so we deadlock. //gc_heap::disable_preemptive (true); } FIRE_EVENT(GCTerminateConcurrentThread_V1); dprintf (3, ("bgc_thread thread exiting")); return; } #ifdef BGC_SERVO_TUNING bool gc_heap::bgc_tuning::stepping_trigger (uint32_t current_memory_load, size_t current_gen2_count) { if (!bgc_tuning::enable_fl_tuning) { return false; } bool stepping_trigger_p = false; if (use_stepping_trigger_p) { dprintf (BGC_TUNING_LOG, ("current ml: %d, goal: %d", current_memory_load, memory_load_goal)); // We don't go all the way up to mem goal because if we do we could end up with every // BGC being triggered by stepping all the way up to goal, and when we actually reach // goal we have no time to react 'cause the next BGC could already be over goal. if ((current_memory_load <= (memory_load_goal * 2 / 3)) || ((memory_load_goal > current_memory_load) && ((memory_load_goal - current_memory_load) > (stepping_interval * 3)))) { int memory_load_delta = (int)current_memory_load - (int)last_stepping_mem_load; if (memory_load_delta >= (int)stepping_interval) { stepping_trigger_p = (current_gen2_count == last_stepping_bgc_count); if (stepping_trigger_p) { current_gen2_count++; } dprintf (BGC_TUNING_LOG, ("current ml: %d - %d = %d (>= %d), gen2 count: %d->%d, stepping trigger: %s ", current_memory_load, last_stepping_mem_load, memory_load_delta, stepping_interval, last_stepping_bgc_count, current_gen2_count, (stepping_trigger_p ? "yes" : "no"))); last_stepping_mem_load = current_memory_load; last_stepping_bgc_count = current_gen2_count; } } else { use_stepping_trigger_p = false; } } return stepping_trigger_p; } // Note that I am doing this per heap but as we are in this calculation other // heaps could increase their fl alloc. We are okay with that inaccurancy. bool gc_heap::bgc_tuning::should_trigger_bgc_loh() { if (fl_tuning_triggered) { #ifdef MULTIPLE_HEAPS gc_heap* hp = g_heaps[0]; #else gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS if (!(gc_heap::background_running_p())) { size_t current_alloc = get_total_servo_alloc (loh_generation); tuning_calculation* current_gen_calc = &gen_calc[loh_generation - max_generation]; if (current_alloc < current_gen_calc->last_bgc_end_alloc) { dprintf (BGC_TUNING_LOG, ("BTL: current alloc: %Id, last alloc: %Id?", current_alloc, current_gen_calc->last_bgc_end_alloc)); } bool trigger_p = ((current_alloc - current_gen_calc->last_bgc_end_alloc) >= current_gen_calc->alloc_to_trigger); dprintf (2, ("BTL3: LOH a %Id, la: %Id(%Id), %Id", current_alloc, current_gen_calc->last_bgc_end_alloc, (current_alloc - current_gen_calc->last_bgc_end_alloc), current_gen_calc->alloc_to_trigger)); if (trigger_p) { dprintf (BGC_TUNING_LOG, ("BTL3: LOH detected (%Id - %Id) >= %Id, TRIGGER", current_alloc, current_gen_calc->last_bgc_end_alloc, current_gen_calc->alloc_to_trigger)); return true; } } } return false; } bool gc_heap::bgc_tuning::should_trigger_bgc() { if (!bgc_tuning::enable_fl_tuning || gc_heap::background_running_p()) { return false; } if (settings.reason == reason_bgc_tuning_loh) { // TODO: this should be an assert because if the reason was reason_bgc_tuning_loh, // we should have already set to condemn max_generation but I'm keeping it // for now in case we are reverting it for other reasons. bgc_tuning::next_bgc_p = true; dprintf (BGC_TUNING_LOG, ("BTL LOH triggered")); return true; } if (!bgc_tuning::next_bgc_p && !fl_tuning_triggered && (gc_heap::settings.entry_memory_load >= (memory_load_goal * 2 / 3)) && (gc_heap::full_gc_counts[gc_type_background] >= 2)) { next_bgc_p = true; gen_calc[0].first_alloc_to_trigger = gc_heap::get_total_servo_alloc (max_generation); gen_calc[1].first_alloc_to_trigger = gc_heap::get_total_servo_alloc (loh_generation); dprintf (BGC_TUNING_LOG, ("BTL[GTC] mem high enough: %d(goal: %d), %Id BGCs done, g2a=%Id, g3a=%Id, trigger FL tuning!", gc_heap::settings.entry_memory_load, memory_load_goal, gc_heap::full_gc_counts[gc_type_background], gen_calc[0].first_alloc_to_trigger, gen_calc[1].first_alloc_to_trigger)); } if (bgc_tuning::next_bgc_p) { dprintf (BGC_TUNING_LOG, ("BTL started FL tuning")); return true; } if (!fl_tuning_triggered) { return false; } // If the tuning started, we need to check if we've exceeded the alloc. int index = 0; bgc_tuning::tuning_calculation* current_gen_calc = 0; index = 0; current_gen_calc = &bgc_tuning::gen_calc[index]; #ifdef MULTIPLE_HEAPS gc_heap* hp = g_heaps[0]; #else gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS size_t current_gen1_index = dd_collection_count (hp->dynamic_data_of (max_generation - 1)); size_t gen1_so_far = current_gen1_index - gen1_index_last_bgc_end; if (current_gen_calc->alloc_to_trigger > 0) { // We are specifically checking for gen2 here. LOH is covered by should_trigger_bgc_loh. size_t current_alloc = get_total_servo_alloc (max_generation); if ((current_alloc - current_gen_calc->last_bgc_end_alloc) >= current_gen_calc->alloc_to_trigger) { dprintf (BGC_TUNING_LOG, ("BTL2: SOH detected (%Id - %Id) >= %Id, TRIGGER", current_alloc, current_gen_calc->last_bgc_end_alloc, current_gen_calc->alloc_to_trigger)); settings.reason = reason_bgc_tuning_soh; return true; } } return false; } bool gc_heap::bgc_tuning::should_delay_alloc (int gen_number) { if ((gen_number != max_generation) || !bgc_tuning::enable_fl_tuning) return false; if (current_c_gc_state == c_gc_state_planning) { int i = 0; #ifdef MULTIPLE_HEAPS for (; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; size_t current_fl_size = generation_free_list_space (hp->generation_of (max_generation)); size_t last_bgc_fl_size = hp->bgc_maxgen_end_fl_size; #else { size_t current_fl_size = generation_free_list_space (generation_of (max_generation)); size_t last_bgc_fl_size = bgc_maxgen_end_fl_size; #endif //MULTIPLE_HEAPS if (last_bgc_fl_size) { float current_flr = (float) current_fl_size / (float)last_bgc_fl_size; if (current_flr < 0.4) { dprintf (BGC_TUNING_LOG, ("BTL%d h%d last fl %Id, curr fl %Id (%.3f) d1", gen_number, i, last_bgc_fl_size, current_fl_size, current_flr)); return true; } } } } return false; } void gc_heap::bgc_tuning::update_bgc_start (int gen_number, size_t num_gen1s_since_end) { int tuning_data_index = gen_number - max_generation; tuning_calculation* current_gen_calc = &gen_calc[tuning_data_index]; tuning_stats* current_gen_stats = &gen_stats[tuning_data_index]; size_t total_generation_size = get_total_generation_size (gen_number); ptrdiff_t current_bgc_fl_size = get_total_generation_fl_size (gen_number); double physical_gen_flr = (double)current_bgc_fl_size * 100.0 / (double)total_generation_size; ptrdiff_t artificial_additional_fl = 0; if (fl_tuning_triggered) { artificial_additional_fl = ((current_gen_calc->end_gen_size_goal > total_generation_size) ? (current_gen_calc->end_gen_size_goal - total_generation_size) : 0); total_generation_size += artificial_additional_fl; current_bgc_fl_size += artificial_additional_fl; } current_gen_calc->current_bgc_start_flr = (double)current_bgc_fl_size * 100.0 / (double)total_generation_size; size_t current_alloc = get_total_servo_alloc (gen_number); dprintf (BGC_TUNING_LOG, ("BTL%d: st a: %Id, la: %Id", gen_number, current_alloc, current_gen_stats->last_alloc)); current_gen_stats->last_alloc_end_to_start = current_alloc - current_gen_stats->last_alloc; current_gen_stats->last_alloc = current_alloc; current_gen_calc->actual_alloc_to_trigger = current_alloc - current_gen_calc->last_bgc_end_alloc; dprintf (BGC_TUNING_LOG, ("BTL%d: st: %Id g1s (%Id->%Id/gen1) since end, flr: %.3f(afl: %Id, %.3f)", gen_number, actual_num_gen1s_to_trigger, current_gen_stats->last_alloc_end_to_start, (num_gen1s_since_end ? (current_gen_stats->last_alloc_end_to_start / num_gen1s_since_end) : 0), current_gen_calc->current_bgc_start_flr, artificial_additional_fl, physical_gen_flr)); } void gc_heap::bgc_tuning::record_bgc_start() { if (!bgc_tuning::enable_fl_tuning) return; uint64_t elapsed_time_so_far = GetHighPrecisionTimeStamp() - process_start_time; // Note that younger gen's collection count is always updated with older gen's collections. // So to calcuate the actual # of gen1 occurred we really should take the # of gen2s into // account (and deduct from gen1's collection count). But right now I am using it for stats. size_t current_gen1_index = get_current_gc_index (max_generation - 1); dprintf (BGC_TUNING_LOG, ("BTL: g2t[st][g1 %Id]: %0.3f minutes", current_gen1_index, (double)elapsed_time_so_far / (double)1000000 / (double)60)); actual_num_gen1s_to_trigger = current_gen1_index - gen1_index_last_bgc_end; gen1_index_last_bgc_start = current_gen1_index; update_bgc_start (max_generation, actual_num_gen1s_to_trigger); update_bgc_start (loh_generation, actual_num_gen1s_to_trigger); } double convert_range (double lower, double upper, double num, double percentage) { double d = num - lower; if (d < 0.0) return 0.0; else { d = min ((upper - lower), d); return (d * percentage); } } double calculate_gradual_d (double delta_double, double step) { bool changed_sign = false; if (delta_double < 0.0) { delta_double = -delta_double; changed_sign = true; } double res = 0; double current_lower_limit = 0; double current_ratio = 1.0; // Given a step, we will gradually reduce the weight of the portion // in each step. // We reduce by *0.6 each time so there will be 3 iterations: // 1->0.6->0.36 (next one would be 0.216 and terminate the loop) // This will produce a result that's between 0 and 0.098. while (current_ratio > 0.22) { res += convert_range (current_lower_limit, (current_lower_limit + step), delta_double, current_ratio); current_lower_limit += step; current_ratio *= 0.6; } if (changed_sign) res = -res; return res; } void gc_heap::bgc_tuning::update_bgc_sweep_start (int gen_number, size_t num_gen1s_since_start) { int tuning_data_index = gen_number - max_generation; tuning_calculation* current_gen_calc = &gen_calc[tuning_data_index]; tuning_stats* current_gen_stats = &gen_stats[tuning_data_index]; size_t total_generation_size = 0; ptrdiff_t current_bgc_fl_size = 0; total_generation_size = get_total_generation_size (gen_number); current_bgc_fl_size = get_total_generation_fl_size (gen_number); double physical_gen_flr = (double)current_bgc_fl_size * 100.0 / (double)total_generation_size; ptrdiff_t artificial_additional_fl = 0; if (fl_tuning_triggered) { artificial_additional_fl = ((current_gen_calc->end_gen_size_goal > total_generation_size) ? (current_gen_calc->end_gen_size_goal - total_generation_size) : 0); total_generation_size += artificial_additional_fl; current_bgc_fl_size += artificial_additional_fl; } current_gen_calc->current_bgc_sweep_flr = (double)current_bgc_fl_size * 100.0 / (double)total_generation_size; size_t current_alloc = get_total_servo_alloc (gen_number); dprintf (BGC_TUNING_LOG, ("BTL%d: sw a: %Id, la: %Id", gen_number, current_alloc, current_gen_stats->last_alloc)); current_gen_stats->last_alloc_start_to_sweep = current_alloc - current_gen_stats->last_alloc; // We are resetting gen2 alloc at sweep start. current_gen_stats->last_alloc = 0; #ifdef SIMPLE_DPRINTF dprintf (BGC_TUNING_LOG, ("BTL%d: sflr: %.3f%%->%.3f%% (%Id->%Id, %Id->%Id) (%Id:%Id-%Id/gen1) since start (afl: %Id, %.3f)", gen_number, current_gen_calc->last_bgc_flr, current_gen_calc->current_bgc_sweep_flr, current_gen_calc->last_bgc_size, total_generation_size, current_gen_stats->last_bgc_fl_size, current_bgc_fl_size, num_gen1s_since_start, current_gen_stats->last_alloc_start_to_sweep, (num_gen1s_since_start? (current_gen_stats->last_alloc_start_to_sweep / num_gen1s_since_start) : 0), artificial_additional_fl, physical_gen_flr)); #endif //SIMPLE_DPRINTF } void gc_heap::bgc_tuning::record_bgc_sweep_start() { if (!bgc_tuning::enable_fl_tuning) return; size_t current_gen1_index = get_current_gc_index (max_generation - 1); size_t num_gen1s_since_start = current_gen1_index - gen1_index_last_bgc_start; gen1_index_last_bgc_sweep = current_gen1_index; uint64_t elapsed_time_so_far = GetHighPrecisionTimeStamp() - process_start_time; dprintf (BGC_TUNING_LOG, ("BTL: g2t[sw][g1 %Id]: %0.3f minutes", current_gen1_index, (double)elapsed_time_so_far / (double)1000000 / (double)60)); update_bgc_sweep_start (max_generation, num_gen1s_since_start); update_bgc_sweep_start (loh_generation, num_gen1s_since_start); } void gc_heap::bgc_tuning::calculate_tuning (int gen_number, bool use_this_loop_p) { BOOL use_kd_p = enable_kd; BOOL use_ki_p = enable_ki; BOOL use_smooth_p = enable_smooth; BOOL use_tbh_p = enable_tbh; BOOL use_ff_p = enable_ff; int tuning_data_index = gen_number - max_generation; tuning_calculation* current_gen_calc = &gen_calc[tuning_data_index]; tuning_stats* current_gen_stats = &gen_stats[tuning_data_index]; bgc_size_data* data = &current_bgc_end_data[tuning_data_index]; size_t total_generation_size = data->gen_size; size_t current_bgc_fl = data->gen_fl_size; size_t current_bgc_surv_size = get_total_surv_size (gen_number); size_t current_bgc_begin_data_size = get_total_begin_data_size (gen_number); // This is usually 0 unless a GC happened where we joined at the end of sweep size_t current_alloc = get_total_servo_alloc (gen_number); //dprintf (BGC_TUNING_LOG, ("BTL%d: current fl alloc: %Id, last recorded alloc: %Id, last_bgc_end_alloc: %Id", dprintf (BGC_TUNING_LOG, ("BTL%d: en a: %Id, la: %Id, lbgca: %Id", gen_number, current_alloc, current_gen_stats->last_alloc, current_gen_calc->last_bgc_end_alloc)); double current_bgc_surv_rate = (current_bgc_begin_data_size == 0) ? 0 : ((double)current_bgc_surv_size * 100.0 / (double)current_bgc_begin_data_size); current_gen_stats->last_alloc_sweep_to_end = current_alloc - current_gen_stats->last_alloc; size_t gen1_index = get_current_gc_index (max_generation - 1); size_t gen2_index = get_current_gc_index (max_generation); size_t num_gen1s_since_sweep = gen1_index - gen1_index_last_bgc_sweep; size_t num_gen1s_bgc_end = gen1_index - gen1_index_last_bgc_end; size_t gen_end_size_goal = current_gen_calc->end_gen_size_goal; double gen_sweep_flr_goal = current_gen_calc->sweep_flr_goal; size_t last_gen_alloc_to_trigger = current_gen_calc->alloc_to_trigger; size_t gen_actual_alloc_to_trigger = current_gen_calc->actual_alloc_to_trigger; size_t last_gen_alloc_to_trigger_0 = current_gen_calc->alloc_to_trigger_0; double current_end_to_sweep_flr = current_gen_calc->last_bgc_flr - current_gen_calc->current_bgc_sweep_flr; bool current_sweep_above_p = (current_gen_calc->current_bgc_sweep_flr > gen_sweep_flr_goal); #ifdef SIMPLE_DPRINTF dprintf (BGC_TUNING_LOG, ("BTL%d: sflr: c %.3f (%s), p %s, palloc: %Id, aalloc %Id(%s)", gen_number, current_gen_calc->current_bgc_sweep_flr, (current_sweep_above_p ? "above" : "below"), (current_gen_calc->last_sweep_above_p ? "above" : "below"), last_gen_alloc_to_trigger, current_gen_calc->actual_alloc_to_trigger, (use_this_loop_p ? "this" : "last"))); dprintf (BGC_TUNING_LOG, ("BTL%d-en[g1: %Id, g2: %Id]: end fl: %Id (%Id: S-%Id, %.3f%%->%.3f%%)", gen_number, gen1_index, gen2_index, current_bgc_fl, total_generation_size, current_bgc_surv_size, current_gen_stats->last_bgc_surv_rate, current_bgc_surv_rate)); dprintf (BGC_TUNING_LOG, ("BTLS%d sflr: %.3f, end-start: %Id(%Id), start-sweep: %Id(%Id), sweep-end: %Id(%Id)", gen_number, current_gen_calc->current_bgc_sweep_flr, (gen1_index_last_bgc_start - gen1_index_last_bgc_end), current_gen_stats->last_alloc_end_to_start, (gen1_index_last_bgc_sweep - gen1_index_last_bgc_start), current_gen_stats->last_alloc_start_to_sweep, num_gen1s_since_sweep, current_gen_stats->last_alloc_sweep_to_end)); #endif //SIMPLE_DPRINTF size_t saved_alloc_to_trigger = 0; // during our calculation alloc can be negative so use double here. double current_alloc_to_trigger = 0.0; if (!fl_tuning_triggered && use_tbh_p) { current_gen_calc->alloc_to_trigger_0 = current_gen_calc->actual_alloc_to_trigger; dprintf (BGC_TUNING_LOG, ("BTL%d[g1: %Id]: not in FL tuning yet, setting alloc_to_trigger_0 to %Id", gen_number, gen1_index, current_gen_calc->alloc_to_trigger_0)); } if (fl_tuning_triggered) { BOOL tuning_kd_finished_p = FALSE; // We shouldn't have an alloc_to_trigger that's > what's consumed before sweep happens. double max_alloc_to_trigger = ((double)current_bgc_fl * (100 - gen_sweep_flr_goal) / 100.0); double min_alloc_to_trigger = (double)current_bgc_fl * 0.05; { if (current_gen_calc->current_bgc_sweep_flr < 0.0) { dprintf (BGC_TUNING_LOG, ("BTL%d: sflr is %.3f!!! < 0, make it 0", gen_number, current_gen_calc->current_bgc_sweep_flr)); current_gen_calc->current_bgc_sweep_flr = 0.0; } double adjusted_above_goal_kp = above_goal_kp; double above_goal_distance = current_gen_calc->current_bgc_sweep_flr - gen_sweep_flr_goal; if (use_ki_p) { if (current_gen_calc->above_goal_accu_error > max_alloc_to_trigger) { dprintf (BGC_TUNING_LOG, ("g%d: ae TB! %.1f->%.1f", gen_number, current_gen_calc->above_goal_accu_error, max_alloc_to_trigger)); } else if (current_gen_calc->above_goal_accu_error < min_alloc_to_trigger) { dprintf (BGC_TUNING_LOG, ("g%d: ae TS! %.1f->%.1f", gen_number, current_gen_calc->above_goal_accu_error, min_alloc_to_trigger)); } current_gen_calc->above_goal_accu_error = min (max_alloc_to_trigger, current_gen_calc->above_goal_accu_error); current_gen_calc->above_goal_accu_error = max (min_alloc_to_trigger, current_gen_calc->above_goal_accu_error); double above_goal_ki_gain = above_goal_ki * above_goal_distance * current_bgc_fl; double temp_accu_error = current_gen_calc->above_goal_accu_error + above_goal_ki_gain; // anti-windup if ((temp_accu_error > min_alloc_to_trigger) && (temp_accu_error < max_alloc_to_trigger)) { current_gen_calc->above_goal_accu_error = temp_accu_error; } else { //dprintf (BGC_TUNING_LOG, ("alloc accu err + %.1f=%.1f, exc", dprintf (BGC_TUNING_LOG, ("g%d: aae + %.1f=%.1f, exc", gen_number, above_goal_ki_gain, temp_accu_error)); } } // First we do the PI loop. { saved_alloc_to_trigger = current_gen_calc->alloc_to_trigger; current_alloc_to_trigger = adjusted_above_goal_kp * above_goal_distance * current_bgc_fl; // la is last alloc_to_trigger, +%Id is the diff between la and the new alloc. // laa is the last actual alloc (gen_actual_alloc_to_trigger), +%Id is the diff between la and laa. dprintf (BGC_TUNING_LOG, ("BTL%d: sflr %.3f above * %.4f * %Id = %Id bytes in alloc, la: %Id(+%Id), laa: %Id(+%Id)", gen_number, (current_gen_calc->current_bgc_sweep_flr - (double)gen_sweep_flr_goal), adjusted_above_goal_kp, current_bgc_fl, (size_t)current_alloc_to_trigger, saved_alloc_to_trigger, (size_t)(current_alloc_to_trigger - (double)saved_alloc_to_trigger), gen_actual_alloc_to_trigger, (gen_actual_alloc_to_trigger - saved_alloc_to_trigger))); if (use_ki_p) { current_alloc_to_trigger += current_gen_calc->above_goal_accu_error; dprintf (BGC_TUNING_LOG, ("BTL%d: +accu err %Id=%Id", gen_number, (size_t)(current_gen_calc->above_goal_accu_error), (size_t)current_alloc_to_trigger)); } } if (use_tbh_p) { if (current_gen_calc->last_sweep_above_p != current_sweep_above_p) { size_t new_alloc_to_trigger_0 = (last_gen_alloc_to_trigger + last_gen_alloc_to_trigger_0) / 2; dprintf (BGC_TUNING_LOG, ("BTL%d: tbh crossed SP, setting both to %Id", gen_number, new_alloc_to_trigger_0)); current_gen_calc->alloc_to_trigger_0 = new_alloc_to_trigger_0; current_gen_calc->alloc_to_trigger = new_alloc_to_trigger_0; } tuning_kd_finished_p = TRUE; } } if (!tuning_kd_finished_p) { if (use_kd_p) { saved_alloc_to_trigger = last_gen_alloc_to_trigger; size_t alloc_delta = saved_alloc_to_trigger - gen_actual_alloc_to_trigger; double adjust_ratio = (double)alloc_delta / (double)gen_actual_alloc_to_trigger; double saved_adjust_ratio = adjust_ratio; if (enable_gradual_d) { adjust_ratio = calculate_gradual_d (adjust_ratio, above_goal_kd); dprintf (BGC_TUNING_LOG, ("BTL%d: gradual kd - reduced from %.3f to %.3f", gen_number, saved_adjust_ratio, adjust_ratio)); } else { double kd = above_goal_kd; double neg_kd = 0 - kd; if (adjust_ratio > kd) adjust_ratio = kd; if (adjust_ratio < neg_kd) adjust_ratio = neg_kd; dprintf (BGC_TUNING_LOG, ("BTL%d: kd - reduced from %.3f to %.3f", gen_number, saved_adjust_ratio, adjust_ratio)); } current_gen_calc->alloc_to_trigger = (size_t)((double)gen_actual_alloc_to_trigger * (1 + adjust_ratio)); dprintf (BGC_TUNING_LOG, ("BTL%d: kd %.3f, reduced it to %.3f * %Id, adjust %Id->%Id", gen_number, saved_adjust_ratio, adjust_ratio, gen_actual_alloc_to_trigger, saved_alloc_to_trigger, current_gen_calc->alloc_to_trigger)); } if (use_smooth_p && use_this_loop_p) { saved_alloc_to_trigger = current_gen_calc->alloc_to_trigger; size_t gen_smoothed_alloc_to_trigger = current_gen_calc->smoothed_alloc_to_trigger; double current_num_gen1s_smooth_factor = (num_gen1s_smooth_factor > (double)num_bgcs_since_tuning_trigger) ? (double)num_bgcs_since_tuning_trigger : num_gen1s_smooth_factor; current_gen_calc->smoothed_alloc_to_trigger = (size_t)((double)saved_alloc_to_trigger / current_num_gen1s_smooth_factor + ((double)gen_smoothed_alloc_to_trigger / current_num_gen1s_smooth_factor) * (current_num_gen1s_smooth_factor - 1.0)); dprintf (BGC_TUNING_LOG, ("BTL%d: smoothed %Id / %.3f + %Id / %.3f * %.3f adjust %Id->%Id", gen_number, saved_alloc_to_trigger, current_num_gen1s_smooth_factor, gen_smoothed_alloc_to_trigger, current_num_gen1s_smooth_factor, (current_num_gen1s_smooth_factor - 1.0), saved_alloc_to_trigger, current_gen_calc->smoothed_alloc_to_trigger)); current_gen_calc->alloc_to_trigger = current_gen_calc->smoothed_alloc_to_trigger; } } if (use_ff_p) { double next_end_to_sweep_flr = data->gen_flr - gen_sweep_flr_goal; if (next_end_to_sweep_flr > 0.0) { saved_alloc_to_trigger = current_gen_calc->alloc_to_trigger; double ff_ratio = next_end_to_sweep_flr / current_end_to_sweep_flr - 1; if (use_this_loop_p) { // if we adjust down we want ff to be bigger, so the alloc will be even smaller; // if we adjust up want ff to be smaller, so the alloc will also be smaller; // the idea is we want to be slower at increase than decrease double ff_step = above_goal_ff * 0.5; double adjusted_above_goal_ff = above_goal_ff; if (ff_ratio > 0) adjusted_above_goal_ff -= ff_step; else adjusted_above_goal_ff += ff_step; double adjusted_ff_ratio = ff_ratio * adjusted_above_goal_ff; current_gen_calc->alloc_to_trigger = saved_alloc_to_trigger + (size_t)((double)saved_alloc_to_trigger * adjusted_ff_ratio); dprintf (BGC_TUNING_LOG, ("BTL%d: ff (%.3f / %.3f - 1) * %.3f = %.3f adjust %Id->%Id", gen_number, next_end_to_sweep_flr, current_end_to_sweep_flr, adjusted_above_goal_ff, adjusted_ff_ratio, saved_alloc_to_trigger, current_gen_calc->alloc_to_trigger)); } } } if (use_this_loop_p) { // apply low/high caps. if (current_alloc_to_trigger > max_alloc_to_trigger) { dprintf (BGC_TUNING_LOG, ("BTL%d: TB! %.1f -> %.1f", gen_number, current_alloc_to_trigger, max_alloc_to_trigger)); current_alloc_to_trigger = max_alloc_to_trigger; } if (current_alloc_to_trigger < min_alloc_to_trigger) { dprintf (BGC_TUNING_LOG, ("BTL%d: TS! %Id -> %Id", gen_number, (ptrdiff_t)current_alloc_to_trigger, (size_t)min_alloc_to_trigger)); current_alloc_to_trigger = min_alloc_to_trigger; } current_gen_calc->alloc_to_trigger = (size_t)current_alloc_to_trigger; } else { // we can't do the above comparison - we could be in the situation where // we haven't done any alloc. dprintf (BGC_TUNING_LOG, ("BTL%d: ag, revert %Id->%Id", gen_number, current_gen_calc->alloc_to_trigger, last_gen_alloc_to_trigger)); current_gen_calc->alloc_to_trigger = last_gen_alloc_to_trigger; } } // This is only executed once to get the tuning started. if (next_bgc_p) { size_t first_alloc = (size_t)((double)current_gen_calc->first_alloc_to_trigger * 0.75); // The initial conditions can be quite erratic so check to see if the first alloc we set was reasonable - take 5% of the FL size_t min_first_alloc = current_bgc_fl / 20; current_gen_calc->alloc_to_trigger = max (first_alloc, min_first_alloc); dprintf (BGC_TUNING_LOG, ("BTL%d[g1: %Id]: BGC end, trigger FL, set gen%d alloc to max (0.75 of first: %Id, 5%% fl: %Id), actual alloc: %Id", gen_number, gen1_index, gen_number, first_alloc, min_first_alloc, current_gen_calc->actual_alloc_to_trigger)); } dprintf (BGC_TUNING_LOG, ("BTL%d* %Id, %.3f, %.3f, %.3f, %.3f, %.3f, %Id, %Id, %Id, %Id", gen_number, total_generation_size, current_gen_calc->current_bgc_start_flr, current_gen_calc->current_bgc_sweep_flr, current_bgc_end_data[tuning_data_index].gen_flr, current_gen_stats->last_gen_increase_flr, current_bgc_surv_rate, actual_num_gen1s_to_trigger, num_gen1s_bgc_end, gen_actual_alloc_to_trigger, current_gen_calc->alloc_to_trigger)); gen1_index_last_bgc_end = gen1_index; current_gen_calc->last_bgc_size = total_generation_size; current_gen_calc->last_bgc_flr = current_bgc_end_data[tuning_data_index].gen_flr; current_gen_calc->last_sweep_above_p = current_sweep_above_p; current_gen_calc->last_bgc_end_alloc = current_alloc; current_gen_stats->last_bgc_physical_size = data->gen_physical_size; current_gen_stats->last_alloc_end_to_start = 0; current_gen_stats->last_alloc_start_to_sweep = 0; current_gen_stats->last_alloc_sweep_to_end = 0; current_gen_stats->last_alloc = current_alloc; current_gen_stats->last_bgc_fl_size = current_bgc_end_data[tuning_data_index].gen_fl_size; current_gen_stats->last_bgc_surv_rate = current_bgc_surv_rate; current_gen_stats->last_gen_increase_flr = 0; } // Note that in this method for the !use_this_loop_p generation we will adjust // its sweep_flr accordingly. And the inner loop will not need to know about this. void gc_heap::bgc_tuning::init_bgc_end_data (int gen_number, bool use_this_loop_p) { int index = gen_number - max_generation; bgc_size_data* data = &current_bgc_end_data[index]; size_t physical_size = get_total_generation_size (gen_number); ptrdiff_t physical_fl_size = get_total_generation_fl_size (gen_number); data->gen_actual_phys_fl_size = physical_fl_size; if (fl_tuning_triggered && !use_this_loop_p) { tuning_calculation* current_gen_calc = &gen_calc[gen_number - max_generation]; if (current_gen_calc->actual_alloc_to_trigger > current_gen_calc->alloc_to_trigger) { dprintf (BGC_TUNING_LOG, ("BTL%d: gen alloc also exceeded %Id (la: %Id), no action", gen_number, current_gen_calc->actual_alloc_to_trigger, current_gen_calc->alloc_to_trigger)); } else { // We will deduct the missing portion from alloc to fl, simulating that we consumed it. size_t remaining_alloc = current_gen_calc->alloc_to_trigger - current_gen_calc->actual_alloc_to_trigger; // now re-calc current_bgc_sweep_flr // TODO: note that I am assuming the physical size at sweep was <= end_gen_size_goal which // not have been the case. size_t gen_size = current_gen_calc->end_gen_size_goal; double sweep_flr = current_gen_calc->current_bgc_sweep_flr; size_t sweep_fl_size = (size_t)((double)gen_size * sweep_flr / 100.0); if (sweep_fl_size < remaining_alloc) { dprintf (BGC_TUNING_LOG, ("BTL%d: sweep fl %Id < remain alloc %Id", gen_number, sweep_fl_size, remaining_alloc)); // TODO: this is saying that we didn't have enough fl to accommodate the // remaining alloc which is suspicious. To set remaining_alloc to // something slightly smaller is only so that we could continue with // our calculation but this is something we should look into. remaining_alloc = sweep_fl_size - (10 * 1024); } size_t new_sweep_fl_size = sweep_fl_size - remaining_alloc; ptrdiff_t signed_new_sweep_fl_size = sweep_fl_size - remaining_alloc; double new_current_bgc_sweep_flr = (double)new_sweep_fl_size * 100.0 / (double)gen_size; double signed_new_current_bgc_sweep_flr = (double)signed_new_sweep_fl_size * 100.0 / (double)gen_size; dprintf (BGC_TUNING_LOG, ("BTL%d: sg: %Id(%Id), sfl: %Id->%Id(%Id)(%.3f->%.3f(%.3f)), la: %Id, aa: %Id", gen_number, gen_size, physical_size, sweep_fl_size, new_sweep_fl_size, signed_new_sweep_fl_size, sweep_flr, new_current_bgc_sweep_flr, signed_new_current_bgc_sweep_flr, current_gen_calc->alloc_to_trigger, current_gen_calc->actual_alloc_to_trigger)); current_gen_calc->actual_alloc_to_trigger = current_gen_calc->alloc_to_trigger; current_gen_calc->current_bgc_sweep_flr = new_current_bgc_sweep_flr; // TODO: NOTE this is duplicated in calculate_tuning except I am not * 100.0 here. size_t current_bgc_surv_size = get_total_surv_size (gen_number); size_t current_bgc_begin_data_size = get_total_begin_data_size (gen_number); double current_bgc_surv_rate = (current_bgc_begin_data_size == 0) ? 0 : ((double)current_bgc_surv_size / (double)current_bgc_begin_data_size); size_t remaining_alloc_surv = (size_t)((double)remaining_alloc * current_bgc_surv_rate); physical_fl_size -= remaining_alloc_surv; dprintf (BGC_TUNING_LOG, ("BTL%d: asfl %Id-%Id=%Id, flr %.3f->%.3f, %.3f%% s, fl %Id-%Id->%Id", gen_number, sweep_fl_size, remaining_alloc, new_sweep_fl_size, sweep_flr, current_gen_calc->current_bgc_sweep_flr, (current_bgc_surv_rate * 100.0), (physical_fl_size + remaining_alloc_surv), remaining_alloc_surv, physical_fl_size)); } } double physical_gen_flr = (double)physical_fl_size * 100.0 / (double)physical_size; data->gen_physical_size = physical_size; data->gen_physical_fl_size = physical_fl_size; data->gen_physical_flr = physical_gen_flr; } void gc_heap::bgc_tuning::calc_end_bgc_fl (int gen_number) { int index = gen_number - max_generation; bgc_size_data* data = &current_bgc_end_data[index]; tuning_calculation* current_gen_calc = &gen_calc[gen_number - max_generation]; size_t virtual_size = current_gen_calc->end_gen_size_goal; size_t physical_size = data->gen_physical_size; ptrdiff_t physical_fl_size = data->gen_physical_fl_size; ptrdiff_t virtual_fl_size = (ptrdiff_t)virtual_size - (ptrdiff_t)physical_size; ptrdiff_t end_gen_fl_size = physical_fl_size + virtual_fl_size; if (end_gen_fl_size < 0) { end_gen_fl_size = 0; } data->gen_size = virtual_size; data->gen_fl_size = end_gen_fl_size; data->gen_flr = (double)(data->gen_fl_size) * 100.0 / (double)(data->gen_size); dprintf (BGC_TUNING_LOG, ("BTL%d: vfl: %Id, size %Id->%Id, fl %Id->%Id, flr %.3f->%.3f", gen_number, virtual_fl_size, data->gen_physical_size, data->gen_size, data->gen_physical_fl_size, data->gen_fl_size, data->gen_physical_flr, data->gen_flr)); } // reduce_p is for NGC2s. we want to reduce the ki so we don't overshoot. double gc_heap::bgc_tuning::calculate_ml_tuning (uint64_t current_available_physical, bool reduce_p, ptrdiff_t* _vfl_from_kp, ptrdiff_t* _vfl_from_ki) { ptrdiff_t error = (ptrdiff_t)(current_available_physical - available_memory_goal); // This is questionable as gen0/1 and other processes are consuming memory // too size_t gen2_physical_size = current_bgc_end_data[0].gen_physical_size; size_t gen3_physical_size = current_bgc_end_data[1].gen_physical_size; double max_output = (double)(total_physical_mem - available_memory_goal - gen2_physical_size - gen3_physical_size); double error_ratio = (double)error / (double)total_physical_mem; // do we want this to contribute to the integral term? bool include_in_i_p = ((error_ratio > 0.005) || (error_ratio < -0.005)); dprintf (BGC_TUNING_LOG, ("total phy %Id, mem goal: %Id, curr phy: %Id, g2 phy: %Id, g3 phy: %Id", (size_t)total_physical_mem, (size_t)available_memory_goal, (size_t)current_available_physical, gen2_physical_size, gen3_physical_size)); dprintf (BGC_TUNING_LOG, ("BTL: Max output: %Id, ER %Id / %Id = %.3f, %s", (size_t)max_output, error, available_memory_goal, error_ratio, (include_in_i_p ? "inc" : "exc"))); if (include_in_i_p) { double error_ki = ml_ki * (double)error; double temp_accu_error = accu_error + error_ki; // anti-windup if ((temp_accu_error > 0) && (temp_accu_error < max_output)) accu_error = temp_accu_error; else { //dprintf (BGC_TUNING_LOG, ("ml accu err + %Id=%Id, exc", dprintf (BGC_TUNING_LOG, ("mae + %Id=%Id, exc", (size_t)error_ki, (size_t)temp_accu_error)); } } if (reduce_p) { double saved_accu_error = accu_error; accu_error = accu_error * 2.0 / 3.0; panic_activated_p = false; accu_error_panic = 0; dprintf (BGC_TUNING_LOG, ("BTL reduced accu ki %Id->%Id", (ptrdiff_t)saved_accu_error, (ptrdiff_t)accu_error)); } if (panic_activated_p) accu_error_panic += (double)error; else accu_error_panic = 0.0; double vfl_from_kp = (double)error * ml_kp; double total_virtual_fl_size = vfl_from_kp + accu_error; // limit output if (total_virtual_fl_size < 0) { dprintf (BGC_TUNING_LOG, ("BTL vfl %Id < 0", (size_t)total_virtual_fl_size)); total_virtual_fl_size = 0; } else if (total_virtual_fl_size > max_output) { dprintf (BGC_TUNING_LOG, ("BTL vfl %Id > max", (size_t)total_virtual_fl_size)); total_virtual_fl_size = max_output; } *_vfl_from_kp = (ptrdiff_t)vfl_from_kp; *_vfl_from_ki = (ptrdiff_t)accu_error; return total_virtual_fl_size; } void gc_heap::bgc_tuning::set_total_gen_sizes (bool use_gen2_loop_p, bool use_gen3_loop_p) { size_t gen2_physical_size = current_bgc_end_data[0].gen_physical_size; size_t gen3_physical_size = 0; ptrdiff_t gen3_virtual_fl_size = 0; gen3_physical_size = current_bgc_end_data[1].gen_physical_size; double gen2_size_ratio = (double)gen2_physical_size / ((double)gen2_physical_size + (double)gen3_physical_size); // We know how far we are from the memory load goal, assuming that the memory is only // used by gen2/3 (which is obviously not the case, but that's why we are not setting the // memory goal at 90+%. Assign the memory proportionally to them. // // We use entry memory load info because that seems to be more closedly correlated to what the VMM decides // in memory load. uint32_t current_memory_load = settings.entry_memory_load; uint64_t current_available_physical = settings.entry_available_physical_mem; panic_activated_p = (current_memory_load >= (memory_load_goal + memory_load_goal_slack)); if (panic_activated_p) { dprintf (BGC_TUNING_LOG, ("BTL: exceeded slack %Id >= (%Id + %Id)", (size_t)current_memory_load, (size_t)memory_load_goal, (size_t)memory_load_goal_slack)); } ptrdiff_t vfl_from_kp = 0; ptrdiff_t vfl_from_ki = 0; double total_virtual_fl_size = calculate_ml_tuning (current_available_physical, false, &vfl_from_kp, &vfl_from_ki); if (use_gen2_loop_p || use_gen3_loop_p) { if (use_gen2_loop_p) { gen2_ratio_correction += ratio_correction_step; } else { gen2_ratio_correction -= ratio_correction_step; } dprintf (BGC_TUNING_LOG, ("BTL: rc: g2 ratio %.3f%% + %d%% = %.3f%%", (gen2_size_ratio * 100.0), (int)(gen2_ratio_correction * 100.0), ((gen2_size_ratio + gen2_ratio_correction) * 100.0))); gen2_ratio_correction = min (0.99, gen2_ratio_correction); gen2_ratio_correction = max (-0.99, gen2_ratio_correction); dprintf (BGC_TUNING_LOG, ("BTL: rc again: g2 ratio %.3f%% + %d%% = %.3f%%", (gen2_size_ratio * 100.0), (int)(gen2_ratio_correction * 100.0), ((gen2_size_ratio + gen2_ratio_correction) * 100.0))); gen2_size_ratio += gen2_ratio_correction; if (gen2_size_ratio <= 0.0) { gen2_size_ratio = 0.01; dprintf (BGC_TUNING_LOG, ("BTL: rc: g2 ratio->0.01")); } if (gen2_size_ratio >= 1.0) { gen2_size_ratio = 0.99; dprintf (BGC_TUNING_LOG, ("BTL: rc: g2 ratio->0.99")); } } ptrdiff_t gen2_virtual_fl_size = (ptrdiff_t)(total_virtual_fl_size * gen2_size_ratio); gen3_virtual_fl_size = (ptrdiff_t)(total_virtual_fl_size * (1.0 - gen2_size_ratio)); if (gen2_virtual_fl_size < 0) { ptrdiff_t saved_gen2_virtual_fl_size = gen2_virtual_fl_size; ptrdiff_t half_gen2_physical_size = (ptrdiff_t)((double)gen2_physical_size * 0.5); if (-gen2_virtual_fl_size > half_gen2_physical_size) { gen2_virtual_fl_size = -half_gen2_physical_size; } dprintf (BGC_TUNING_LOG, ("BTL2: n_vfl %Id(%Id)->%Id", saved_gen2_virtual_fl_size, half_gen2_physical_size, gen2_virtual_fl_size)); gen2_virtual_fl_size = 0; } if (gen3_virtual_fl_size < 0) { ptrdiff_t saved_gen3_virtual_fl_size = gen3_virtual_fl_size; ptrdiff_t half_gen3_physical_size = (ptrdiff_t)((double)gen3_physical_size * 0.5); if (-gen3_virtual_fl_size > half_gen3_physical_size) { gen3_virtual_fl_size = -half_gen3_physical_size; } dprintf (BGC_TUNING_LOG, ("BTL3: n_vfl %Id(%Id)->%Id", saved_gen3_virtual_fl_size, half_gen3_physical_size, gen3_virtual_fl_size)); gen3_virtual_fl_size = 0; } gen_calc[0].end_gen_size_goal = gen2_physical_size + gen2_virtual_fl_size; gen_calc[1].end_gen_size_goal = gen3_physical_size + gen3_virtual_fl_size; // We calculate the end info here because the ff in fl servo loop is using this. calc_end_bgc_fl (max_generation); calc_end_bgc_fl (loh_generation); #ifdef SIMPLE_DPRINTF dprintf (BGC_TUNING_LOG, ("BTL: ml: %d (g: %d)(%s), a: %I64d (g: %I64d, elg: %Id+%Id=%Id, %Id+%Id=%Id, pi=%Id), vfl: %Id=%Id+%Id", current_memory_load, memory_load_goal, ((current_available_physical > available_memory_goal) ? "above" : "below"), current_available_physical, available_memory_goal, gen2_physical_size, gen2_virtual_fl_size, gen_calc[0].end_gen_size_goal, gen3_physical_size, gen3_virtual_fl_size, gen_calc[1].end_gen_size_goal, (ptrdiff_t)accu_error_panic, (ptrdiff_t)total_virtual_fl_size, vfl_from_kp, vfl_from_ki)); #endif //SIMPLE_DPRINTF } bool gc_heap::bgc_tuning::should_trigger_ngc2() { return panic_activated_p; } // This is our outer ml servo loop where we calculate the control for the inner fl servo loop. void gc_heap::bgc_tuning::convert_to_fl (bool use_gen2_loop_p, bool use_gen3_loop_p) { size_t current_bgc_count = full_gc_counts[gc_type_background]; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; hp->bgc_maxgen_end_fl_size = generation_free_list_space (hp->generation_of (max_generation)); } #else bgc_maxgen_end_fl_size = generation_free_list_space (generation_of (max_generation)); #endif //MULTIPLE_HEAPS init_bgc_end_data (max_generation, use_gen2_loop_p); init_bgc_end_data (loh_generation, use_gen3_loop_p); set_total_gen_sizes (use_gen2_loop_p, use_gen3_loop_p); dprintf (BGC_TUNING_LOG, ("BTL: gen2 %Id, fl %Id(%.3f)->%Id; gen3 %Id, fl %Id(%.3f)->%Id, %Id BGCs", current_bgc_end_data[0].gen_size, current_bgc_end_data[0].gen_fl_size, current_bgc_end_data[0].gen_flr, gen_calc[0].end_gen_size_goal, current_bgc_end_data[1].gen_size, current_bgc_end_data[1].gen_fl_size, current_bgc_end_data[1].gen_flr, gen_calc[1].end_gen_size_goal, current_bgc_count)); } void gc_heap::bgc_tuning::record_and_adjust_bgc_end() { if (!bgc_tuning::enable_fl_tuning) return; uint64_t elapsed_time_so_far = GetHighPrecisionTimeStamp() - process_start_time; size_t current_gen1_index = get_current_gc_index (max_generation - 1); dprintf (BGC_TUNING_LOG, ("BTL: g2t[en][g1 %Id]: %0.3f minutes", current_gen1_index, (double)elapsed_time_so_far / (double)1000000 / (double)60)); if (fl_tuning_triggered) { num_bgcs_since_tuning_trigger++; } bool use_gen2_loop_p = (settings.reason == reason_bgc_tuning_soh); bool use_gen3_loop_p = (settings.reason == reason_bgc_tuning_loh); dprintf (BGC_TUNING_LOG, ("BTL: reason: %d, gen2 loop: %s; gen3 loop: %s, promoted %Id bytes", (((settings.reason != reason_bgc_tuning_soh) && (settings.reason != reason_bgc_tuning_loh)) ? saved_bgc_tuning_reason : settings.reason), (use_gen2_loop_p ? "yes" : "no"), (use_gen3_loop_p ? "yes" : "no"), get_total_bgc_promoted())); convert_to_fl (use_gen2_loop_p, use_gen3_loop_p); calculate_tuning (max_generation, true); if (total_loh_a_last_bgc > 0) { calculate_tuning (loh_generation, true); } else { dprintf (BGC_TUNING_LOG, ("BTL: gen3 not allocated")); } if (next_bgc_p) { next_bgc_p = false; fl_tuning_triggered = true; dprintf (BGC_TUNING_LOG, ("BTL: FL tuning ENABLED!!!")); } saved_bgc_tuning_reason = -1; } #endif //BGC_SERVO_TUNING #endif //BACKGROUND_GC //Clear the cards [start_card, end_card[ void gc_heap::clear_cards (size_t start_card, size_t end_card) { if (start_card < end_card) { size_t start_word = card_word (start_card); size_t end_word = card_word (end_card); if (start_word < end_word) { // Figure out the bit positions of the cards within their words unsigned bits = card_bit (start_card); card_table [start_word] &= lowbits (~0, bits); for (size_t i = start_word+1; i < end_word; i++) card_table [i] = 0; bits = card_bit (end_card); // Don't write beyond end_card (and possibly uncommitted card table space). if (bits != 0) { card_table [end_word] &= highbits (~0, bits); } } else { // If the start and end cards are in the same word, just clear the appropriate card // bits in that word. card_table [start_word] &= (lowbits (~0, card_bit (start_card)) | highbits (~0, card_bit (end_card))); } #if defined(_DEBUG) && defined(VERIFY_HEAP) if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { size_t card = start_card; while (card < end_card) { assert (!(card_set_p (card))); card++; } } #endif //_DEBUG && VERIFY_HEAP dprintf (3,("Cleared cards [%Ix:%Ix, %Ix:%Ix[", start_card, (size_t)card_address (start_card), end_card, (size_t)card_address (end_card))); } } void gc_heap::clear_card_for_addresses (uint8_t* start_address, uint8_t* end_address) { size_t start_card = card_of (align_on_card (start_address)); size_t end_card = card_of (align_lower_card (end_address)); clear_cards (start_card, end_card); } // copy [srccard, ...[ to [dst_card, end_card[ // This will set the same bit twice. Can be optimized. inline void gc_heap::copy_cards (size_t dst_card, size_t src_card, size_t end_card, BOOL nextp) { // If the range is empty, this function is a no-op - with the subtlety that // either of the accesses card_table[srcwrd] or card_table[dstwrd] could be // outside the committed region. To avoid the access, leave early. if (!(dst_card < end_card)) return; unsigned int srcbit = card_bit (src_card); unsigned int dstbit = card_bit (dst_card); size_t srcwrd = card_word (src_card); size_t dstwrd = card_word (dst_card); unsigned int srctmp = card_table[srcwrd]; unsigned int dsttmp = card_table[dstwrd]; for (size_t card = dst_card; card < end_card; card++) { if (srctmp & (1 << srcbit)) dsttmp |= 1 << dstbit; else dsttmp &= ~(1 << dstbit); if (!(++srcbit % 32)) { srctmp = card_table[++srcwrd]; srcbit = 0; } if (nextp) { if (srctmp & (1 << srcbit)) dsttmp |= 1 << dstbit; } if (!(++dstbit % 32)) { card_table[dstwrd] = dsttmp; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES if (dsttmp != 0) { card_bundle_set(cardw_card_bundle(dstwrd)); } #endif dstwrd++; dsttmp = card_table[dstwrd]; dstbit = 0; } } card_table[dstwrd] = dsttmp; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES if (dsttmp != 0) { card_bundle_set(cardw_card_bundle(dstwrd)); } #endif } void gc_heap::copy_cards_for_addresses (uint8_t* dest, uint8_t* src, size_t len) { ptrdiff_t relocation_distance = src - dest; size_t start_dest_card = card_of (align_on_card (dest)); size_t end_dest_card = card_of (dest + len - 1); size_t dest_card = start_dest_card; size_t src_card = card_of (card_address (dest_card)+relocation_distance); dprintf (3,("Copying cards [%Ix:%Ix->%Ix:%Ix, ", src_card, (size_t)src, dest_card, (size_t)dest)); dprintf (3,(" %Ix->%Ix:%Ix[", (size_t)src+len, end_dest_card, (size_t)dest+len)); dprintf (3, ("dest: %Ix, src: %Ix, len: %Ix, reloc: %Ix, align_on_card(dest) is %Ix", dest, src, len, relocation_distance, (align_on_card (dest)))); dprintf (3, ("start_dest_card: %Ix (address: %Ix), end_dest_card: %Ix(addr: %Ix), card_of (dest): %Ix", start_dest_card, card_address (start_dest_card), end_dest_card, card_address (end_dest_card), card_of (dest))); //First card has two boundaries if (start_dest_card != card_of (dest)) { if ((card_of (card_address (start_dest_card) + relocation_distance) <= card_of (src + len - 1))&& card_set_p (card_of (card_address (start_dest_card) + relocation_distance))) { dprintf (3, ("card_address (start_dest_card) + reloc is %Ix, card: %Ix(set), src+len-1: %Ix, card: %Ix", (card_address (start_dest_card) + relocation_distance), card_of (card_address (start_dest_card) + relocation_distance), (src + len - 1), card_of (src + len - 1))); dprintf (3, ("setting card: %Ix", card_of (dest))); set_card (card_of (dest)); } } if (card_set_p (card_of (src))) set_card (card_of (dest)); copy_cards (dest_card, src_card, end_dest_card, ((dest - align_lower_card (dest)) != (src - align_lower_card (src)))); //Last card has two boundaries. if ((card_of (card_address (end_dest_card) + relocation_distance) >= card_of (src)) && card_set_p (card_of (card_address (end_dest_card) + relocation_distance))) { dprintf (3, ("card_address (end_dest_card) + reloc is %Ix, card: %Ix(set), src: %Ix, card: %Ix", (card_address (end_dest_card) + relocation_distance), card_of (card_address (end_dest_card) + relocation_distance), src, card_of (src))); dprintf (3, ("setting card: %Ix", end_dest_card)); set_card (end_dest_card); } if (card_set_p (card_of (src + len - 1))) set_card (end_dest_card); #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES card_bundles_set(cardw_card_bundle(card_word(card_of(dest))), cardw_card_bundle(align_cardw_on_bundle(card_word(end_dest_card)))); #endif } #ifdef BACKGROUND_GC // this does not need the Interlocked version of mark_array_set_marked. void gc_heap::copy_mark_bits_for_addresses (uint8_t* dest, uint8_t* src, size_t len) { dprintf (3, ("Copying mark_bits for addresses [%Ix->%Ix, %Ix->%Ix[", (size_t)src, (size_t)dest, (size_t)src+len, (size_t)dest+len)); uint8_t* src_o = src; uint8_t* dest_o; uint8_t* src_end = src + len; int align_const = get_alignment_constant (TRUE); ptrdiff_t reloc = dest - src; while (src_o < src_end) { uint8_t* next_o = src_o + Align (size (src_o), align_const); if (background_object_marked (src_o, TRUE)) { dest_o = src_o + reloc; background_mark (dest_o, background_saved_lowest_address, background_saved_highest_address); dprintf (3, ("bc*%Ix*bc, b*%Ix*b", (size_t)src_o, (size_t)(dest_o))); } src_o = next_o; } } #endif //BACKGROUND_GC void gc_heap::fix_brick_to_highest (uint8_t* o, uint8_t* next_o) { size_t new_current_brick = brick_of (o); set_brick (new_current_brick, (o - brick_address (new_current_brick))); size_t b = 1 + new_current_brick; size_t limit = brick_of (next_o); //dprintf(3,(" fixing brick %Ix to point to object %Ix, till %Ix(%Ix)", dprintf(3,("b:%Ix->%Ix-%Ix", new_current_brick, (size_t)o, (size_t)next_o)); while (b < limit) { set_brick (b,(new_current_brick - b)); b++; } } // start can not be >= heap_segment_allocated for the segment. uint8_t* gc_heap::find_first_object (uint8_t* start, uint8_t* first_object) { size_t brick = brick_of (start); uint8_t* o = 0; //last_object == null -> no search shortcut needed if ((brick == brick_of (first_object) || (start <= first_object))) { o = first_object; } else { ptrdiff_t min_brick = (ptrdiff_t)brick_of (first_object); ptrdiff_t prev_brick = (ptrdiff_t)brick - 1; int brick_entry = 0; while (1) { if (prev_brick < min_brick) { break; } if ((brick_entry = get_brick_entry(prev_brick)) >= 0) { break; } assert (! ((brick_entry == 0))); prev_brick = (brick_entry + prev_brick); } o = ((prev_brick < min_brick) ? first_object : brick_address (prev_brick) + brick_entry - 1); assert (o <= start); } assert (Align (size (o)) >= Align (min_obj_size)); uint8_t* next_o = o + Align (size (o)); size_t curr_cl = (size_t)next_o / brick_size; size_t min_cl = (size_t)first_object / brick_size; #ifdef TRACE_GC unsigned int n_o = 1; #endif //TRACE_GC uint8_t* next_b = min (align_lower_brick (next_o) + brick_size, start+1); while (next_o <= start) { do { #ifdef TRACE_GC n_o++; #endif //TRACE_GC o = next_o; assert (Align (size (o)) >= Align (min_obj_size)); next_o = o + Align (size (o)); Prefetch (next_o); }while (next_o < next_b); if (((size_t)next_o / brick_size) != curr_cl) { if (curr_cl >= min_cl) { fix_brick_to_highest (o, next_o); } curr_cl = (size_t) next_o / brick_size; } next_b = min (align_lower_brick (next_o) + brick_size, start+1); } size_t bo = brick_of (o); //dprintf (3, ("Looked at %Id objects, fixing brick [%Ix-[%Ix", dprintf (3, ("%Id o, [%Ix-[%Ix", n_o, bo, brick)); if (bo < brick) { set_brick (bo, (o - brick_address(bo))); size_t b = 1 + bo; int x = -1; while (b < brick) { set_brick (b,x--); b++; } } return o; } #ifdef CARD_BUNDLE // Find the first non-zero card word between cardw and cardw_end. // The index of the word we find is returned in cardw. BOOL gc_heap::find_card_dword (size_t& cardw, size_t cardw_end) { dprintf (3, ("gc: %d, find_card_dword cardw: %Ix, cardw_end: %Ix", dd_collection_count (dynamic_data_of (0)), cardw, cardw_end)); if (card_bundles_enabled()) { size_t cardb = cardw_card_bundle (cardw); size_t end_cardb = cardw_card_bundle (align_cardw_on_bundle (cardw_end)); while (1) { // Find a non-zero bundle while ((cardb < end_cardb) && (card_bundle_set_p (cardb) == 0)) { cardb++; } if (cardb == end_cardb) return FALSE; uint32_t* card_word = &card_table[max(card_bundle_cardw (cardb),cardw)]; uint32_t* card_word_end = &card_table[min(card_bundle_cardw (cardb+1),cardw_end)]; while ((card_word < card_word_end) && !(*card_word)) { card_word++; } if (card_word != card_word_end) { cardw = (card_word - &card_table[0]); return TRUE; } else if ((cardw <= card_bundle_cardw (cardb)) && (card_word == &card_table [card_bundle_cardw (cardb+1)])) { // a whole bundle was explored and is empty dprintf (3, ("gc: %d, find_card_dword clear bundle: %Ix cardw:[%Ix,%Ix[", dd_collection_count (dynamic_data_of (0)), cardb, card_bundle_cardw (cardb), card_bundle_cardw (cardb+1))); card_bundle_clear (cardb); } cardb++; } } else { uint32_t* card_word = &card_table[cardw]; uint32_t* card_word_end = &card_table [cardw_end]; while (card_word < card_word_end) { if ((*card_word) != 0) { cardw = (card_word - &card_table [0]); return TRUE; } card_word++; } return FALSE; } } #endif //CARD_BUNDLE // Find cards that are set between two points in a card table. // Parameters // card_table : The card table. // card : [in/out] As input, the card to start searching from. // As output, the first card that's set. // card_word_end : The card word at which to stop looking. // end_card : [out] The last card which is set. BOOL gc_heap::find_card(uint32_t* card_table, size_t& card, size_t card_word_end, size_t& end_card) { uint32_t* last_card_word; uint32_t card_word_value; uint32_t bit_position; if (card_word (card) >= card_word_end) return FALSE; // Find the first card which is set last_card_word = &card_table [card_word (card)]; bit_position = card_bit (card); card_word_value = (*last_card_word) >> bit_position; if (!card_word_value) { bit_position = 0; #ifdef CARD_BUNDLE // Using the card bundle, go through the remaining card words between here and // card_word_end until we find one that is non-zero. size_t lcw = card_word(card) + 1; if (gc_heap::find_card_dword (lcw, card_word_end) == FALSE) { return FALSE; } else { last_card_word = &card_table [lcw]; card_word_value = *last_card_word; } #else //CARD_BUNDLE // Go through the remaining card words between here and card_word_end until we find // one that is non-zero. do { ++last_card_word; } while ((last_card_word < &card_table [card_word_end]) && !(*last_card_word)); if (last_card_word < &card_table [card_word_end]) { card_word_value = *last_card_word; } else { // We failed to find any non-zero card words before we got to card_word_end return FALSE; } #endif //CARD_BUNDLE } // Look for the lowest bit set if (card_word_value) { while (!(card_word_value & 1)) { bit_position++; card_word_value = card_word_value / 2; } } // card is the card word index * card size + the bit index within the card card = (last_card_word - &card_table[0]) * card_word_width + bit_position; do { // Keep going until we get to an un-set card. bit_position++; card_word_value = card_word_value / 2; // If we reach the end of the card word and haven't hit a 0 yet, start going // card word by card word until we get to one that's not fully set (0xFFFF...) // or we reach card_word_end. if ((bit_position == card_word_width) && (last_card_word < &card_table [card_word_end-1])) { do { card_word_value = *(++last_card_word); } while ((last_card_word < &card_table [card_word_end-1]) && (card_word_value == ~0u /* (1 << card_word_width)-1 */)); bit_position = 0; } } while (card_word_value & 1); end_card = (last_card_word - &card_table [0])* card_word_width + bit_position; //dprintf (3, ("find_card: [%Ix, %Ix[ set", card, end_card)); dprintf (3, ("fc: [%Ix, %Ix[", card, end_card)); return TRUE; } //because of heap expansion, computing end is complicated. uint8_t* compute_next_end (heap_segment* seg, uint8_t* low) { if ((low >= heap_segment_mem (seg)) && (low < heap_segment_allocated (seg))) return low; else return heap_segment_allocated (seg); } #ifndef USE_REGIONS uint8_t* gc_heap::compute_next_boundary (int gen_number, BOOL relocating) { //when relocating, the fault line is the plan start of the younger //generation because the generation is promoted. if (relocating && (gen_number == (settings.condemned_generation + 1))) { generation* gen = generation_of (gen_number - 1); uint8_t* gen_alloc = generation_plan_allocation_start (gen); assert (gen_alloc); return gen_alloc; } else { assert (gen_number > settings.condemned_generation); return generation_allocation_start (generation_of (gen_number - 1 )); } } #endif //!USE_REGIONS // For regions - // n_gen means it's pointing into the condemned regions so it's incremented // if the child object's region is <= condemned_gen. // cg_pointers_found means it's pointing into a lower generation so it's incremented // if the child object's region is < current_gen. inline void gc_heap::mark_through_cards_helper (uint8_t** poo, size_t& n_gen, size_t& cg_pointers_found, card_fn fn, uint8_t* nhigh, uint8_t* next_boundary, int condemned_gen, // generation of the parent object int current_gen CARD_MARKING_STEALING_ARG(gc_heap* hpt)) { #if defined(FEATURE_CARD_MARKING_STEALING) && defined(MULTIPLE_HEAPS) int thread = hpt->heap_number; #else THREAD_FROM_HEAP; #ifdef MULTIPLE_HEAPS gc_heap* hpt = this; #endif //MULTIPLE_HEAPS #endif //FEATURE_CARD_MARKING_STEALING && MULTIPLE_HEAPS #ifdef USE_REGIONS assert (nhigh == 0); assert (next_boundary == 0); uint8_t* child_object = *poo; if (!is_in_heap_range (child_object)) return; int child_object_gen = get_region_gen_num (child_object); int saved_child_object_gen = child_object_gen; uint8_t* saved_child_object = child_object; if (child_object_gen <= condemned_gen) { n_gen++; call_fn(hpt,fn) (poo THREAD_NUMBER_ARG); } if (fn == &gc_heap::relocate_address) { child_object_gen = get_region_plan_gen_num (*poo); } if (child_object_gen < current_gen) { cg_pointers_found++; dprintf (4, ("cg pointer %Ix found, %Id so far", (size_t)*poo, cg_pointers_found )); } #else //USE_REGIONS assert (condemned_gen == -1); if ((gc_low <= *poo) && (gc_high > *poo)) { n_gen++; call_fn(hpt,fn) (poo THREAD_NUMBER_ARG); } #ifdef MULTIPLE_HEAPS else if (*poo) { gc_heap* hp = heap_of_gc (*poo); if (hp != this) { if ((hp->gc_low <= *poo) && (hp->gc_high > *poo)) { n_gen++; call_fn(hpt,fn) (poo THREAD_NUMBER_ARG); } if ((fn == &gc_heap::relocate_address) || ((hp->ephemeral_low <= *poo) && (hp->ephemeral_high > *poo))) { cg_pointers_found++; } } } #endif //MULTIPLE_HEAPS if ((next_boundary <= *poo) && (nhigh > *poo)) { cg_pointers_found ++; dprintf (4, ("cg pointer %Ix found, %Id so far", (size_t)*poo, cg_pointers_found )); } #endif //USE_REGIONS } BOOL gc_heap::card_transition (uint8_t* po, uint8_t* end, size_t card_word_end, size_t& cg_pointers_found, size_t& n_eph, size_t& n_card_set, size_t& card, size_t& end_card, BOOL& foundp, uint8_t*& start_address, uint8_t*& limit, size_t& n_cards_cleared CARD_MARKING_STEALING_ARGS(card_marking_enumerator& card_mark_enumerator, heap_segment* seg, size_t &card_word_end_out)) { dprintf (3, ("pointer %Ix past card %Ix, cg %Id", (size_t)po, (size_t)card, cg_pointers_found)); BOOL passed_end_card_p = FALSE; foundp = FALSE; if (cg_pointers_found == 0) { //dprintf(3,(" Clearing cards [%Ix, %Ix[ ", dprintf(3,(" CC [%Ix, %Ix[ ", (size_t)card_address(card), (size_t)po)); clear_cards (card, card_of(po)); n_card_set -= (card_of (po) - card); n_cards_cleared += (card_of (po) - card); } n_eph +=cg_pointers_found; cg_pointers_found = 0; card = card_of (po); if (card >= end_card) { passed_end_card_p = TRUE; dprintf (3, ("card %Ix exceeding end_card %Ix", (size_t)card, (size_t)end_card)); foundp = find_card (card_table, card, card_word_end, end_card); if (foundp) { n_card_set+= end_card - card; start_address = card_address (card); dprintf (3, ("NewC: %Ix, start: %Ix, end: %Ix", (size_t)card, (size_t)start_address, (size_t)card_address (end_card))); } limit = min (end, card_address (end_card)); #ifdef FEATURE_CARD_MARKING_STEALING // the card bit @ end_card should not be set // if end_card is still shy of the limit set by card_word_end assert(!((card_word(end_card) < card_word_end) && card_set_p(end_card))); if (!foundp) { card_word_end_out = 0; foundp = find_next_chunk(card_mark_enumerator, seg, n_card_set, start_address, limit, card, end_card, card_word_end_out); } #else // the card bit @ end_card should not be set - // find_card is supposed to terminate only when it finds a 0 bit // or the end of the segment assert (!((limit < end) && card_set_p (end_card))); #endif } return passed_end_card_p; } #ifdef FEATURE_CARD_MARKING_STEALING bool card_marking_enumerator::move_next(heap_segment* seg, uint8_t*& low, uint8_t*& high) { if (segment == nullptr) return false; uint32_t chunk_index = old_chunk_index; old_chunk_index = INVALID_CHUNK_INDEX; if (chunk_index == INVALID_CHUNK_INDEX) chunk_index = Interlocked::Increment((volatile int32_t *)chunk_index_counter); while (true) { uint32_t chunk_index_within_seg = chunk_index - segment_start_chunk_index; uint8_t* start = heap_segment_mem(segment); uint8_t* end = compute_next_end(segment, gc_low); uint8_t* aligned_start = (uint8_t*)((size_t)start & ~(CARD_MARKING_STEALING_GRANULARITY - 1)); size_t seg_size = end - aligned_start; uint32_t chunk_count_within_seg = (uint32_t)((seg_size + (CARD_MARKING_STEALING_GRANULARITY - 1)) / CARD_MARKING_STEALING_GRANULARITY); if (chunk_index_within_seg < chunk_count_within_seg) { if (seg == segment) { low = (chunk_index_within_seg == 0) ? start : (aligned_start + (size_t)chunk_index_within_seg * CARD_MARKING_STEALING_GRANULARITY); high = (chunk_index_within_seg + 1 == chunk_count_within_seg) ? end : (aligned_start + (size_t)(chunk_index_within_seg + 1) * CARD_MARKING_STEALING_GRANULARITY); chunk_high = high; dprintf (3, ("cme:mn ci: %u, low: %Ix, high: %Ix", chunk_index, low, high)); return true; } else { // we found the correct segment, but it's not the segment our caller is in // our caller should still be in one of the previous segments #ifdef _DEBUG for (heap_segment* cur_seg = seg; cur_seg != segment; cur_seg = heap_segment_next_in_range(cur_seg)) { assert(cur_seg); } #endif //_DEBUG // keep the chunk index for later old_chunk_index = chunk_index; dprintf (3, ("cme:mn oci: %u, seg mismatch seg: %Ix, segment: %Ix", old_chunk_index, heap_segment_mem (segment), heap_segment_mem (seg))); return false; } } segment = heap_segment_next_in_range(segment); segment_start_chunk_index += chunk_count_within_seg; if (segment == nullptr) { // keep the chunk index for later old_chunk_index = chunk_index; dprintf (3, ("cme:mn oci: %u no more segments", old_chunk_index)); return false; } } } bool gc_heap::find_next_chunk(card_marking_enumerator& card_mark_enumerator, heap_segment* seg, size_t& n_card_set, uint8_t*& start_address, uint8_t*& limit, size_t& card, size_t& end_card, size_t& card_word_end) { while (true) { if (card_word_end != 0 && find_card(card_table, card, card_word_end, end_card)) { assert(end_card <= card_word_end * card_word_width); n_card_set += end_card - card; start_address = card_address(card); dprintf(3, ("NewC: %Ix, start: %Ix, end: %Ix", (size_t)card, (size_t)start_address, (size_t)card_address(end_card))); limit = min(card_mark_enumerator.get_chunk_high(), card_address(end_card)); dprintf (3, ("New run of cards on heap %d: [%Ix,%Ix[", heap_number, (size_t)start_address, (size_t)limit)); return true; } // we have exhausted this chunk, get the next one uint8_t* chunk_low = nullptr; uint8_t* chunk_high = nullptr; if (!card_mark_enumerator.move_next(seg, chunk_low, chunk_high)) { dprintf (3, ("No more chunks on heap %d\n", heap_number)); return false; } card = max(card, card_of(chunk_low)); card_word_end = (card_of(align_on_card_word(chunk_high)) / card_word_width); dprintf (3, ("Moved to next chunk on heap %d: [%Ix,%Ix[", heap_number, (size_t)chunk_low, (size_t)chunk_high)); } } #endif // FEATURE_CARD_MARKING_STEALING void gc_heap::mark_through_cards_for_segments (card_fn fn, BOOL relocating CARD_MARKING_STEALING_ARG(gc_heap* hpt)) { #ifdef BACKGROUND_GC #ifdef USE_REGIONS dprintf (3, ("current_sweep_pos is %Ix", current_sweep_pos)); #else dprintf (3, ("current_sweep_pos is %Ix, saved_sweep_ephemeral_seg is %Ix(%Ix)", current_sweep_pos, saved_sweep_ephemeral_seg, saved_sweep_ephemeral_start)); #endif //USE_REGIONS for (int i = get_start_generation_index(); i < max_generation; i++) { heap_segment* soh_seg = heap_segment_rw (generation_start_segment (generation_of (i))); PREFIX_ASSUME(soh_seg != NULL); while (soh_seg) { dprintf (3, ("seg %Ix, bgc_alloc: %Ix, alloc: %Ix", soh_seg, heap_segment_background_allocated (soh_seg), heap_segment_allocated (soh_seg))); soh_seg = heap_segment_next_rw (soh_seg); } } #endif //BACKGROUND_GC size_t end_card = 0; generation* oldest_gen = generation_of (max_generation); int curr_gen_number = max_generation; // Note - condemned_gen is only needed for regions and the other 2 are // only for if USE_REGIONS is not defined, but I need to pass them to a // function inside the macro below so just assert they are the unused values. #ifdef USE_REGIONS uint8_t* low = 0; uint8_t* gen_boundary = 0; uint8_t* next_boundary = 0; int condemned_gen = settings.condemned_generation; uint8_t* nhigh = 0; #else uint8_t* low = gc_low; uint8_t* high = gc_high; uint8_t* gen_boundary = generation_allocation_start(generation_of(curr_gen_number - 1)); uint8_t* next_boundary = compute_next_boundary(curr_gen_number, relocating); int condemned_gen = -1; uint8_t* nhigh = (relocating ? heap_segment_plan_allocated (ephemeral_heap_segment) : high); #endif //USE_REGIONS heap_segment* seg = heap_segment_rw (generation_start_segment (oldest_gen)); PREFIX_ASSUME(seg != NULL); uint8_t* beg = get_soh_start_object (seg, oldest_gen); uint8_t* end = compute_next_end (seg, low); uint8_t* last_object = beg; size_t cg_pointers_found = 0; size_t card_word_end = (card_of (align_on_card_word (end)) / card_word_width); size_t n_eph = 0; size_t n_gen = 0; size_t n_card_set = 0; BOOL foundp = FALSE; uint8_t* start_address = 0; uint8_t* limit = 0; size_t card = card_of (beg); #ifdef BACKGROUND_GC BOOL consider_bgc_mark_p = FALSE; BOOL check_current_sweep_p = FALSE; BOOL check_saved_sweep_p = FALSE; should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p); #endif //BACKGROUND_GC dprintf(3, ("CMs: %Ix->%Ix", (size_t)beg, (size_t)end)); size_t total_cards_cleared = 0; #ifdef FEATURE_CARD_MARKING_STEALING card_marking_enumerator card_mark_enumerator (seg, low, (VOLATILE(uint32_t)*)&card_mark_chunk_index_soh); card_word_end = 0; #endif // FEATURE_CARD_MARKING_STEALING while (1) { if (card_of(last_object) > card) { dprintf (3, ("Found %Id cg pointers", cg_pointers_found)); if (cg_pointers_found == 0) { uint8_t* last_object_processed = last_object; #ifdef FEATURE_CARD_MARKING_STEALING last_object_processed = min(limit, last_object); #endif // FEATURE_CARD_MARKING_STEALING dprintf (3, (" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card), (size_t)last_object_processed)); clear_cards(card, card_of(last_object_processed)); n_card_set -= (card_of(last_object_processed) - card); total_cards_cleared += (card_of(last_object_processed) - card); } n_eph += cg_pointers_found; cg_pointers_found = 0; card = card_of (last_object); } if (card >= end_card) { #ifdef FEATURE_CARD_MARKING_STEALING // find another chunk with some cards set foundp = find_next_chunk(card_mark_enumerator, seg, n_card_set, start_address, limit, card, end_card, card_word_end); #else // FEATURE_CARD_MARKING_STEALING foundp = find_card(card_table, card, card_word_end, end_card); if (foundp) { n_card_set += end_card - card; start_address = max (beg, card_address (card)); } limit = min (end, card_address (end_card)); #endif // FEATURE_CARD_MARKING_STEALING } if (!foundp || (last_object >= end) || (card_address (card) >= end)) { if (foundp && (cg_pointers_found == 0)) { #ifndef USE_REGIONS // in the segment case, need to recompute end_card so we don't clear cards // for the next generation end_card = card_of (end); #endif dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card), (size_t)card_address(end_card))); clear_cards (card, end_card); n_card_set -= (end_card - card); total_cards_cleared += (end_card - card); } n_eph += cg_pointers_found; cg_pointers_found = 0; #ifdef FEATURE_CARD_MARKING_STEALING // we have decided to move to the next segment - make sure we exhaust the chunk enumerator for this segment card_mark_enumerator.exhaust_segment(seg); #endif // FEATURE_CARD_MARKING_STEALING seg = heap_segment_next_in_range (seg); #ifdef USE_REGIONS if (!seg) { curr_gen_number--; if (curr_gen_number > condemned_gen) { // Switch to regions for this generation. seg = generation_start_segment (generation_of (curr_gen_number)); #ifdef FEATURE_CARD_MARKING_STEALING card_mark_enumerator.switch_to_segment(seg); #endif // FEATURE_CARD_MARKING_STEALING dprintf (REGIONS_LOG, ("h%d switching to gen%d start seg %Ix", heap_number, curr_gen_number, (size_t)seg)); } } #endif //USE_REGIONS if (seg) { #ifdef BACKGROUND_GC should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p); #endif //BACKGROUND_GC beg = heap_segment_mem (seg); #ifdef USE_REGIONS end = heap_segment_allocated (seg); #else end = compute_next_end (seg, low); #endif //USE_REGIONS #ifdef FEATURE_CARD_MARKING_STEALING card_word_end = 0; #else // FEATURE_CARD_MARKING_STEALING card_word_end = card_of (align_on_card_word (end)) / card_word_width; #endif // FEATURE_CARD_MARKING_STEALING card = card_of (beg); last_object = beg; end_card = 0; continue; } else { break; } } assert (card_set_p (card)); { uint8_t* o = last_object; o = find_first_object (start_address, last_object); // Never visit an object twice. assert (o >= last_object); #ifndef USE_REGIONS //dprintf(3,("Considering card %Ix start object: %Ix, %Ix[ boundary: %Ix", dprintf(3, ("c: %Ix, o: %Ix, l: %Ix[ boundary: %Ix", card, (size_t)o, (size_t)limit, (size_t)gen_boundary)); #endif //USE_REGIONS while (o < limit) { assert (Align (size (o)) >= Align (min_obj_size)); size_t s = size (o); // next_o is the next object in the heap walk uint8_t* next_o = o + Align (s); // while cont_o is the object we should continue with at the end_object label uint8_t* cont_o = next_o; Prefetch (next_o); #ifndef USE_REGIONS if ((o >= gen_boundary) && (seg == ephemeral_heap_segment)) { dprintf (3, ("switching gen boundary %Ix", (size_t)gen_boundary)); curr_gen_number--; assert ((curr_gen_number > 0)); gen_boundary = generation_allocation_start (generation_of (curr_gen_number - 1)); next_boundary = (compute_next_boundary (curr_gen_number, relocating)); } #endif //!USE_REGIONS dprintf (4, ("|%Ix|", (size_t)o)); if (next_o < start_address) { goto end_object; } #ifdef BACKGROUND_GC if (!fgc_should_consider_object (o, seg, consider_bgc_mark_p, check_current_sweep_p, check_saved_sweep_p)) { goto end_object; } #endif //BACKGROUND_GC #ifdef COLLECTIBLE_CLASS if (is_collectible(o)) { BOOL passed_end_card_p = FALSE; if (card_of (o) > card) { passed_end_card_p = card_transition (o, end, card_word_end, cg_pointers_found, n_eph, n_card_set, card, end_card, foundp, start_address, limit, total_cards_cleared CARD_MARKING_STEALING_ARGS(card_mark_enumerator, seg, card_word_end)); } if ((!passed_end_card_p || foundp) && (card_of (o) == card)) { // card is valid and it covers the head of the object if (fn == &gc_heap::relocate_address) { cg_pointers_found++; } else { uint8_t* class_obj = get_class_object (o); mark_through_cards_helper (&class_obj, n_gen, cg_pointers_found, fn, nhigh, next_boundary, condemned_gen, curr_gen_number CARD_MARKING_STEALING_ARG(hpt)); } } if (passed_end_card_p) { if (foundp && (card_address (card) < next_o)) { goto go_through_refs; } else if (foundp && (start_address < limit)) { cont_o = find_first_object (start_address, o); goto end_object; } else goto end_limit; } } go_through_refs: #endif //COLLECTIBLE_CLASS if (contain_pointers (o)) { dprintf(3,("Going through %Ix start_address: %Ix", (size_t)o, (size_t)start_address)); { dprintf (4, ("normal object path")); go_through_object (method_table(o), o, s, poo, start_address, use_start, (o + s), { dprintf (4, ("<%Ix>:%Ix", (size_t)poo, (size_t)*poo)); if (card_of ((uint8_t*)poo) > card) { BOOL passed_end_card_p = card_transition ((uint8_t*)poo, end, card_word_end, cg_pointers_found, n_eph, n_card_set, card, end_card, foundp, start_address, limit, total_cards_cleared CARD_MARKING_STEALING_ARGS(card_mark_enumerator, seg, card_word_end)); if (passed_end_card_p) { if (foundp && (card_address (card) < next_o)) { //new_start(); { if (ppstop <= (uint8_t**)start_address) {break;} else if (poo < (uint8_t**)start_address) {poo = (uint8_t**)start_address;} } } else if (foundp && (start_address < limit)) { cont_o = find_first_object (start_address, o); goto end_object; } else goto end_limit; } } mark_through_cards_helper (poo, n_gen, cg_pointers_found, fn, nhigh, next_boundary, condemned_gen, curr_gen_number CARD_MARKING_STEALING_ARG(hpt)); } ); } } end_object: if (((size_t)next_o / brick_size) != ((size_t) o / brick_size)) { if (brick_table [brick_of (o)] <0) fix_brick_to_highest (o, next_o); } o = cont_o; } end_limit: last_object = o; } } // compute the efficiency ratio of the card table if (!relocating) { #ifdef FEATURE_CARD_MARKING_STEALING Interlocked::ExchangeAddPtr(&n_eph_soh, n_eph); Interlocked::ExchangeAddPtr(&n_gen_soh, n_gen); dprintf (3, ("h%d marking h%d Msoh: cross: %Id, useful: %Id, cards set: %Id, cards cleared: %Id, ratio: %d", hpt->heap_number, heap_number, n_eph, n_gen, n_card_set, total_cards_cleared, (n_eph ? (int)(((float)n_gen / (float)n_eph) * 100) : 0))); dprintf (3, ("h%d marking h%d Msoh: total cross %Id, useful: %Id, running ratio: %d", hpt->heap_number, heap_number, (size_t)n_eph_soh, (size_t)n_gen_soh, (n_eph_soh ? (int)(((float)n_gen_soh / (float)n_eph_soh) * 100) : 0))); #else generation_skip_ratio = ((n_eph > MIN_SOH_CROSS_GEN_REFS) ? (int)(((float)n_gen / (float)n_eph) * 100) : 100); dprintf (3, ("marking h%d Msoh: cross: %Id, useful: %Id, cards set: %Id, cards cleared: %Id, ratio: %d", heap_number, n_eph, n_gen, n_card_set, total_cards_cleared, generation_skip_ratio)); #endif //FEATURE_CARD_MARKING_STEALING } else { dprintf (3, ("R: Msoh: cross: %Id, useful: %Id, cards set: %Id, cards cleared: %Id, ratio: %d", n_gen, n_eph, n_card_set, total_cards_cleared, generation_skip_ratio)); } } #ifndef USE_REGIONS #ifdef SEG_REUSE_STATS size_t gc_heap::dump_buckets (size_t* ordered_indices, int count, size_t* total_size) { size_t total_items = 0; *total_size = 0; for (int i = 0; i < count; i++) { total_items += ordered_indices[i]; *total_size += ordered_indices[i] << (MIN_INDEX_POWER2 + i); dprintf (SEG_REUSE_LOG_0, ("[%d]%4d 2^%2d", heap_number, ordered_indices[i], (MIN_INDEX_POWER2 + i))); } dprintf (SEG_REUSE_LOG_0, ("[%d]Total %d items, total size is 0x%Ix", heap_number, total_items, *total_size)); return total_items; } #endif // SEG_REUSE_STATS void gc_heap::count_plug (size_t last_plug_size, uint8_t*& last_plug) { // detect pinned plugs if (!pinned_plug_que_empty_p() && (last_plug == pinned_plug (oldest_pin()))) { deque_pinned_plug(); update_oldest_pinned_plug(); dprintf (3, ("deque pin,now oldest pin is %Ix", pinned_plug (oldest_pin()))); } else { size_t plug_size = last_plug_size + Align(min_obj_size); BOOL is_padded = FALSE; #ifdef SHORT_PLUGS plug_size += Align (min_obj_size); is_padded = TRUE; #endif //SHORT_PLUGS #ifdef RESPECT_LARGE_ALIGNMENT plug_size += switch_alignment_size (is_padded); #endif //RESPECT_LARGE_ALIGNMENT total_ephemeral_plugs += plug_size; size_t plug_size_power2 = round_up_power2 (plug_size); ordered_plug_indices[relative_index_power2_plug (plug_size_power2)]++; dprintf (SEG_REUSE_LOG_1, ("[%d]count_plug: adding 0x%Ix - %Id (2^%d) to ordered plug array", heap_number, last_plug, plug_size, (relative_index_power2_plug (plug_size_power2) + MIN_INDEX_POWER2))); } } void gc_heap::count_plugs_in_brick (uint8_t* tree, uint8_t*& last_plug) { assert ((tree != NULL)); if (node_left_child (tree)) { count_plugs_in_brick (tree + node_left_child (tree), last_plug); } if (last_plug != 0) { uint8_t* plug = tree; size_t gap_size = node_gap_size (plug); uint8_t* gap = (plug - gap_size); uint8_t* last_plug_end = gap; size_t last_plug_size = (last_plug_end - last_plug); dprintf (3, ("tree: %Ix, last plug: %Ix, gap size: %Ix, gap: %Ix, last plug size: %Ix", tree, last_plug, gap_size, gap, last_plug_size)); if (tree == oldest_pinned_plug) { dprintf (3, ("tree %Ix is pinned, last plug is %Ix, size is %Ix", tree, last_plug, last_plug_size)); mark* m = oldest_pin(); if (m->has_pre_plug_info()) { last_plug_size += sizeof (gap_reloc_pair); dprintf (3, ("pin %Ix has pre plug, adjusting plug size to %Ix", tree, last_plug_size)); } } // Can't assert here - if it's a pinned plug it can be less. //assert (last_plug_size >= Align (min_obj_size)); count_plug (last_plug_size, last_plug); } last_plug = tree; if (node_right_child (tree)) { count_plugs_in_brick (tree + node_right_child (tree), last_plug); } } void gc_heap::build_ordered_plug_indices () { memset (ordered_plug_indices, 0, sizeof(ordered_plug_indices)); memset (saved_ordered_plug_indices, 0, sizeof(saved_ordered_plug_indices)); uint8_t* start_address = generation_limit (max_generation); uint8_t* end_address = heap_segment_allocated (ephemeral_heap_segment); size_t current_brick = brick_of (start_address); size_t end_brick = brick_of (end_address - 1); uint8_t* last_plug = 0; //Look for the right pinned plug to start from. reset_pinned_queue_bos(); while (!pinned_plug_que_empty_p()) { mark* m = oldest_pin(); if ((m->first >= start_address) && (m->first < end_address)) { dprintf (3, ("found a pin %Ix between %Ix and %Ix", m->first, start_address, end_address)); break; } else deque_pinned_plug(); } update_oldest_pinned_plug(); while (current_brick <= end_brick) { int brick_entry = brick_table [ current_brick ]; if (brick_entry >= 0) { count_plugs_in_brick (brick_address (current_brick) + brick_entry -1, last_plug); } current_brick++; } if (last_plug !=0) { count_plug (end_address - last_plug, last_plug); } // we need to make sure that after fitting all the existing plugs, we // have big enough free space left to guarantee that the next allocation // will succeed. size_t extra_size = END_SPACE_AFTER_GC_FL; total_ephemeral_plugs += extra_size; dprintf (SEG_REUSE_LOG_0, ("Making sure we can fit a large object after fitting all plugs")); ordered_plug_indices[relative_index_power2_plug (round_up_power2 (extra_size))]++; memcpy (saved_ordered_plug_indices, ordered_plug_indices, sizeof(ordered_plug_indices)); #ifdef SEG_REUSE_STATS dprintf (SEG_REUSE_LOG_0, ("Plugs:")); size_t total_plug_power2 = 0; dump_buckets (ordered_plug_indices, MAX_NUM_BUCKETS, &total_plug_power2); dprintf (SEG_REUSE_LOG_0, ("plugs: 0x%Ix (rounded up to 0x%Ix (%d%%))", total_ephemeral_plugs, total_plug_power2, (total_ephemeral_plugs ? (total_plug_power2 * 100 / total_ephemeral_plugs) : 0))); dprintf (SEG_REUSE_LOG_0, ("-------------------")); #endif // SEG_REUSE_STATS } void gc_heap::init_ordered_free_space_indices () { memset (ordered_free_space_indices, 0, sizeof(ordered_free_space_indices)); memset (saved_ordered_free_space_indices, 0, sizeof(saved_ordered_free_space_indices)); } void gc_heap::trim_free_spaces_indices () { trimmed_free_space_index = -1; size_t max_count = max_free_space_items - 1; size_t count = 0; int i = 0; for (i = (MAX_NUM_BUCKETS - 1); i >= 0; i--) { count += ordered_free_space_indices[i]; if (count >= max_count) { break; } } ptrdiff_t extra_free_space_items = count - max_count; if (extra_free_space_items > 0) { ordered_free_space_indices[i] -= extra_free_space_items; free_space_items = max_count; trimmed_free_space_index = i; } else { free_space_items = count; } if (i == -1) { i = 0; } free_space_buckets = MAX_NUM_BUCKETS - i; for (--i; i >= 0; i--) { ordered_free_space_indices[i] = 0; } memcpy (saved_ordered_free_space_indices, ordered_free_space_indices, sizeof(ordered_free_space_indices)); } // We fit as many plugs as we can and update the number of plugs left and the number // of free spaces left. BOOL gc_heap::can_fit_in_spaces_p (size_t* ordered_blocks, int small_index, size_t* ordered_spaces, int big_index) { assert (small_index <= big_index); assert (big_index < MAX_NUM_BUCKETS); size_t small_blocks = ordered_blocks[small_index]; if (small_blocks == 0) { return TRUE; } size_t big_spaces = ordered_spaces[big_index]; if (big_spaces == 0) { return FALSE; } dprintf (SEG_REUSE_LOG_1, ("[%d]Fitting %Id 2^%d plugs into %Id 2^%d free spaces", heap_number, small_blocks, (small_index + MIN_INDEX_POWER2), big_spaces, (big_index + MIN_INDEX_POWER2))); size_t big_to_small = big_spaces << (big_index - small_index); ptrdiff_t extra_small_spaces = big_to_small - small_blocks; dprintf (SEG_REUSE_LOG_1, ("[%d]%d 2^%d spaces can fit %d 2^%d blocks", heap_number, big_spaces, (big_index + MIN_INDEX_POWER2), big_to_small, (small_index + MIN_INDEX_POWER2))); BOOL can_fit = (extra_small_spaces >= 0); if (can_fit) { dprintf (SEG_REUSE_LOG_1, ("[%d]Can fit with %d 2^%d extras blocks", heap_number, extra_small_spaces, (small_index + MIN_INDEX_POWER2))); } int i = 0; dprintf (SEG_REUSE_LOG_1, ("[%d]Setting # of 2^%d spaces to 0", heap_number, (big_index + MIN_INDEX_POWER2))); ordered_spaces[big_index] = 0; if (extra_small_spaces > 0) { dprintf (SEG_REUSE_LOG_1, ("[%d]Setting # of 2^%d blocks to 0", heap_number, (small_index + MIN_INDEX_POWER2))); ordered_blocks[small_index] = 0; for (i = small_index; i < big_index; i++) { if (extra_small_spaces & 1) { dprintf (SEG_REUSE_LOG_1, ("[%d]Increasing # of 2^%d spaces from %d to %d", heap_number, (i + MIN_INDEX_POWER2), ordered_spaces[i], (ordered_spaces[i] + 1))); ordered_spaces[i] += 1; } extra_small_spaces >>= 1; } dprintf (SEG_REUSE_LOG_1, ("[%d]Finally increasing # of 2^%d spaces from %d to %d", heap_number, (i + MIN_INDEX_POWER2), ordered_spaces[i], (ordered_spaces[i] + extra_small_spaces))); ordered_spaces[i] += extra_small_spaces; } else { dprintf (SEG_REUSE_LOG_1, ("[%d]Decreasing # of 2^%d blocks from %d to %d", heap_number, (small_index + MIN_INDEX_POWER2), ordered_blocks[small_index], (ordered_blocks[small_index] - big_to_small))); ordered_blocks[small_index] -= big_to_small; } #ifdef SEG_REUSE_STATS size_t temp; dprintf (SEG_REUSE_LOG_1, ("[%d]Plugs became:", heap_number)); dump_buckets (ordered_blocks, MAX_NUM_BUCKETS, &temp); dprintf (SEG_REUSE_LOG_1, ("[%d]Free spaces became:", heap_number)); dump_buckets (ordered_spaces, MAX_NUM_BUCKETS, &temp); #endif //SEG_REUSE_STATS return can_fit; } // space_index gets updated to the biggest available space index. BOOL gc_heap::can_fit_blocks_p (size_t* ordered_blocks, int block_index, size_t* ordered_spaces, int* space_index) { assert (*space_index >= block_index); while (!can_fit_in_spaces_p (ordered_blocks, block_index, ordered_spaces, *space_index)) { (*space_index)--; if (*space_index < block_index) { return FALSE; } } return TRUE; } BOOL gc_heap::can_fit_all_blocks_p (size_t* ordered_blocks, size_t* ordered_spaces, int count) { #ifdef FEATURE_STRUCTALIGN // BARTOKTODO (4841): reenable when can_fit_in_spaces_p takes alignment requirements into account return FALSE; #endif // FEATURE_STRUCTALIGN int space_index = count - 1; for (int block_index = (count - 1); block_index >= 0; block_index--) { if (!can_fit_blocks_p (ordered_blocks, block_index, ordered_spaces, &space_index)) { return FALSE; } } return TRUE; } void gc_heap::build_ordered_free_spaces (heap_segment* seg) { assert (bestfit_seg); //bestfit_seg->add_buckets (MAX_NUM_BUCKETS - free_space_buckets + MIN_INDEX_POWER2, // ordered_free_space_indices + (MAX_NUM_BUCKETS - free_space_buckets), // free_space_buckets, // free_space_items); bestfit_seg->add_buckets (MIN_INDEX_POWER2, ordered_free_space_indices, MAX_NUM_BUCKETS, free_space_items); assert (settings.condemned_generation == max_generation); uint8_t* first_address = heap_segment_mem (seg); uint8_t* end_address = heap_segment_reserved (seg); //look through the pinned plugs for relevant ones. //Look for the right pinned plug to start from. reset_pinned_queue_bos(); mark* m = 0; // See comment in can_expand_into_p why we need this size. size_t eph_gen_starts = eph_gen_starts_size + Align (min_obj_size); BOOL has_fit_gen_starts = FALSE; while (!pinned_plug_que_empty_p()) { m = oldest_pin(); if ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address) && (pinned_len (m) >= eph_gen_starts)) { assert ((pinned_plug (m) - pinned_len (m)) == bestfit_first_pin); break; } else { deque_pinned_plug(); } } if (!pinned_plug_que_empty_p()) { bestfit_seg->add ((void*)m, TRUE, TRUE); deque_pinned_plug(); m = oldest_pin(); has_fit_gen_starts = TRUE; } while (!pinned_plug_que_empty_p() && ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address))) { bestfit_seg->add ((void*)m, TRUE, FALSE); deque_pinned_plug(); m = oldest_pin(); } if (commit_end_of_seg) { if (!has_fit_gen_starts) { assert (bestfit_first_pin == heap_segment_plan_allocated (seg)); } bestfit_seg->add ((void*)seg, FALSE, (!has_fit_gen_starts)); } #ifdef _DEBUG bestfit_seg->check(); #endif //_DEBUG } BOOL gc_heap::try_best_fit (BOOL end_of_segment_p) { if (!end_of_segment_p) { trim_free_spaces_indices (); } BOOL can_bestfit = can_fit_all_blocks_p (ordered_plug_indices, ordered_free_space_indices, MAX_NUM_BUCKETS); return can_bestfit; } BOOL gc_heap::best_fit (size_t free_space, size_t largest_free_space, size_t additional_space, BOOL* use_additional_space) { dprintf (SEG_REUSE_LOG_0, ("gen%d: trying best fit mechanism", settings.condemned_generation)); assert (!additional_space || (additional_space && use_additional_space)); if (use_additional_space) { *use_additional_space = FALSE; } if (ordered_plug_indices_init == FALSE) { total_ephemeral_plugs = 0; build_ordered_plug_indices(); ordered_plug_indices_init = TRUE; } else { memcpy (ordered_plug_indices, saved_ordered_plug_indices, sizeof(ordered_plug_indices)); } if (total_ephemeral_plugs == END_SPACE_AFTER_GC_FL) { dprintf (SEG_REUSE_LOG_0, ("No ephemeral plugs to realloc, done")); size_t empty_eph = (END_SPACE_AFTER_GC_FL + (Align (min_obj_size)) * (max_generation + 1)); BOOL can_fit_empty_eph = (largest_free_space >= empty_eph); if (!can_fit_empty_eph) { can_fit_empty_eph = (additional_space >= empty_eph); if (can_fit_empty_eph) { *use_additional_space = TRUE; } } return can_fit_empty_eph; } if ((total_ephemeral_plugs + approximate_new_allocation()) >= (free_space + additional_space)) { dprintf (SEG_REUSE_LOG_0, ("We won't have enough free space left in this segment after fitting, done")); return FALSE; } if ((free_space + additional_space) == 0) { dprintf (SEG_REUSE_LOG_0, ("No free space in this segment, done")); return FALSE; } #ifdef SEG_REUSE_STATS dprintf (SEG_REUSE_LOG_0, ("Free spaces:")); size_t total_free_space_power2 = 0; size_t total_free_space_items = dump_buckets (ordered_free_space_indices, MAX_NUM_BUCKETS, &total_free_space_power2); dprintf (SEG_REUSE_LOG_0, ("currently max free spaces is %Id", max_free_space_items)); dprintf (SEG_REUSE_LOG_0, ("Ephemeral plugs: 0x%Ix, free space: 0x%Ix (rounded down to 0x%Ix (%Id%%)), additional free_space: 0x%Ix", total_ephemeral_plugs, free_space, total_free_space_power2, (free_space ? (total_free_space_power2 * 100 / free_space) : 0), additional_space)); size_t saved_all_free_space_indices[MAX_NUM_BUCKETS]; memcpy (saved_all_free_space_indices, ordered_free_space_indices, sizeof(saved_all_free_space_indices)); #endif // SEG_REUSE_STATS if (total_ephemeral_plugs > (free_space + additional_space)) { return FALSE; } use_bestfit = try_best_fit(FALSE); if (!use_bestfit && additional_space) { int relative_free_space_index = relative_index_power2_free_space (round_down_power2 (additional_space)); if (relative_free_space_index != -1) { int relative_plug_index = 0; size_t plugs_to_fit = 0; for (relative_plug_index = (MAX_NUM_BUCKETS - 1); relative_plug_index >= 0; relative_plug_index--) { plugs_to_fit = ordered_plug_indices[relative_plug_index]; if (plugs_to_fit != 0) { break; } } if ((relative_plug_index > relative_free_space_index) || ((relative_plug_index == relative_free_space_index) && (plugs_to_fit > 1))) { #ifdef SEG_REUSE_STATS dprintf (SEG_REUSE_LOG_0, ("additional space is 2^%d but we stopped at %d 2^%d plug(s)", (relative_free_space_index + MIN_INDEX_POWER2), plugs_to_fit, (relative_plug_index + MIN_INDEX_POWER2))); #endif // SEG_REUSE_STATS goto adjust; } dprintf (SEG_REUSE_LOG_0, ("Adding end of segment (2^%d)", (relative_free_space_index + MIN_INDEX_POWER2))); ordered_free_space_indices[relative_free_space_index]++; use_bestfit = try_best_fit(TRUE); if (use_bestfit) { free_space_items++; // Since we might've trimmed away some of the free spaces we had, we should see // if we really need to use end of seg space - if it's the same or smaller than // the largest space we trimmed we can just add that one back instead of // using end of seg. if (relative_free_space_index > trimmed_free_space_index) { *use_additional_space = TRUE; } else { // If the addition space is <= than the last trimmed space, we // should just use that last trimmed space instead. saved_ordered_free_space_indices[trimmed_free_space_index]++; } } } } adjust: if (!use_bestfit) { dprintf (SEG_REUSE_LOG_0, ("couldn't fit...")); #ifdef SEG_REUSE_STATS size_t saved_max = max_free_space_items; BOOL temp_bestfit = FALSE; dprintf (SEG_REUSE_LOG_0, ("----Starting experiment process----")); dprintf (SEG_REUSE_LOG_0, ("----Couldn't fit with max free items %Id", max_free_space_items)); // TODO: need to take the end of segment into consideration. while (max_free_space_items <= total_free_space_items) { max_free_space_items += max_free_space_items / 2; dprintf (SEG_REUSE_LOG_0, ("----Temporarily increasing max free spaces to %Id", max_free_space_items)); memcpy (ordered_free_space_indices, saved_all_free_space_indices, sizeof(ordered_free_space_indices)); if (try_best_fit(FALSE)) { temp_bestfit = TRUE; break; } } if (temp_bestfit) { dprintf (SEG_REUSE_LOG_0, ("----With %Id max free spaces we could fit", max_free_space_items)); } else { dprintf (SEG_REUSE_LOG_0, ("----Tried all free spaces and still couldn't fit, lost too much space")); } dprintf (SEG_REUSE_LOG_0, ("----Restoring max free spaces to %Id", saved_max)); max_free_space_items = saved_max; #endif // SEG_REUSE_STATS if (free_space_items) { max_free_space_items = min (MAX_NUM_FREE_SPACES, free_space_items * 2); max_free_space_items = max (max_free_space_items, MIN_NUM_FREE_SPACES); } else { max_free_space_items = MAX_NUM_FREE_SPACES; } } dprintf (SEG_REUSE_LOG_0, ("Adjusted number of max free spaces to %Id", max_free_space_items)); dprintf (SEG_REUSE_LOG_0, ("------End of best fitting process------\n")); return use_bestfit; } BOOL gc_heap::process_free_space (heap_segment* seg, size_t free_space, size_t min_free_size, size_t min_cont_size, size_t* total_free_space, size_t* largest_free_space) { *total_free_space += free_space; *largest_free_space = max (*largest_free_space, free_space); #ifdef SIMPLE_DPRINTF dprintf (SEG_REUSE_LOG_1, ("free space len: %Ix, total free space: %Ix, largest free space: %Ix", free_space, *total_free_space, *largest_free_space)); #endif //SIMPLE_DPRINTF if ((*total_free_space >= min_free_size) && (*largest_free_space >= min_cont_size)) { #ifdef SIMPLE_DPRINTF dprintf (SEG_REUSE_LOG_0, ("(gen%d)total free: %Ix(min: %Ix), largest free: %Ix(min: %Ix). Found segment %Ix to reuse without bestfit", settings.condemned_generation, *total_free_space, min_free_size, *largest_free_space, min_cont_size, (size_t)seg)); #else UNREFERENCED_PARAMETER(seg); #endif //SIMPLE_DPRINTF return TRUE; } int free_space_index = relative_index_power2_free_space (round_down_power2 (free_space)); if (free_space_index != -1) { ordered_free_space_indices[free_space_index]++; } return FALSE; } BOOL gc_heap::can_expand_into_p (heap_segment* seg, size_t min_free_size, size_t min_cont_size, allocator* gen_allocator) { min_cont_size += END_SPACE_AFTER_GC; use_bestfit = FALSE; commit_end_of_seg = FALSE; bestfit_first_pin = 0; uint8_t* first_address = heap_segment_mem (seg); uint8_t* end_address = heap_segment_reserved (seg); size_t end_extra_space = end_space_after_gc(); if ((heap_segment_reserved (seg) - end_extra_space) <= heap_segment_plan_allocated (seg)) { dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p: can't use segment [%Ix %Ix, has less than %d bytes at the end", first_address, end_address, end_extra_space)); return FALSE; } end_address -= end_extra_space; dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p(gen%d): min free: %Ix, min continuous: %Ix", settings.condemned_generation, min_free_size, min_cont_size)); size_t eph_gen_starts = eph_gen_starts_size; if (settings.condemned_generation == max_generation) { size_t free_space = 0; size_t largest_free_space = free_space; dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p: gen2: testing segment [%Ix %Ix", first_address, end_address)); //Look through the pinned plugs for relevant ones and Look for the right pinned plug to start from. //We are going to allocate the generation starts in the 1st free space, //so start from the first free space that's big enough for gen starts and a min object size. // If we see a free space that is >= gen starts but < gen starts + min obj size we just don't use it - // we could use it by allocating the last generation start a bit bigger but // the complexity isn't worth the effort (those plugs are from gen2 // already anyway). reset_pinned_queue_bos(); mark* m = 0; BOOL has_fit_gen_starts = FALSE; init_ordered_free_space_indices (); while (!pinned_plug_que_empty_p()) { m = oldest_pin(); if ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address) && (pinned_len (m) >= (eph_gen_starts + Align (min_obj_size)))) { break; } else { deque_pinned_plug(); } } if (!pinned_plug_que_empty_p()) { bestfit_first_pin = pinned_plug (m) - pinned_len (m); if (process_free_space (seg, pinned_len (m) - eph_gen_starts, min_free_size, min_cont_size, &free_space, &largest_free_space)) { return TRUE; } deque_pinned_plug(); m = oldest_pin(); has_fit_gen_starts = TRUE; } dprintf (3, ("first pin is %Ix", pinned_plug (m))); //tally up free space while (!pinned_plug_que_empty_p() && ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address))) { dprintf (3, ("looking at pin %Ix", pinned_plug (m))); if (process_free_space (seg, pinned_len (m), min_free_size, min_cont_size, &free_space, &largest_free_space)) { return TRUE; } deque_pinned_plug(); m = oldest_pin(); } //try to find space at the end of the segment. size_t end_space = (end_address - heap_segment_plan_allocated (seg)); size_t additional_space = ((min_free_size > free_space) ? (min_free_size - free_space) : 0); dprintf (SEG_REUSE_LOG_0, ("end space: %Ix; additional: %Ix", end_space, additional_space)); if (end_space >= additional_space) { BOOL can_fit = TRUE; commit_end_of_seg = TRUE; if (largest_free_space < min_cont_size) { if (end_space >= min_cont_size) { additional_space = max (min_cont_size, additional_space); dprintf (SEG_REUSE_LOG_0, ("(gen2)Found segment %Ix to reuse without bestfit, with committing end of seg for eph", seg)); } else { if (settings.concurrent) { can_fit = FALSE; commit_end_of_seg = FALSE; } else { size_t additional_space_bestfit = additional_space; if (!has_fit_gen_starts) { if (additional_space_bestfit < (eph_gen_starts + Align (min_obj_size))) { dprintf (SEG_REUSE_LOG_0, ("(gen2)Couldn't fit, gen starts not allocated yet and end space is too small: %Id", additional_space_bestfit)); return FALSE; } bestfit_first_pin = heap_segment_plan_allocated (seg); additional_space_bestfit -= eph_gen_starts; } can_fit = best_fit (free_space, largest_free_space, additional_space_bestfit, &commit_end_of_seg); if (can_fit) { dprintf (SEG_REUSE_LOG_0, ("(gen2)Found segment %Ix to reuse with bestfit, %s committing end of seg", seg, (commit_end_of_seg ? "with" : "without"))); } else { dprintf (SEG_REUSE_LOG_0, ("(gen2)Couldn't fit, total free space is %Ix", (free_space + end_space))); } } } } else { dprintf (SEG_REUSE_LOG_0, ("(gen2)Found segment %Ix to reuse without bestfit, with committing end of seg", seg)); } assert (additional_space <= end_space); if (commit_end_of_seg) { if (!grow_heap_segment (seg, heap_segment_plan_allocated (seg) + additional_space)) { dprintf (2, ("Couldn't commit end of segment?!")); use_bestfit = FALSE; return FALSE; } if (use_bestfit) { // We increase the index here because growing heap segment could create a discrepency with // the additional space we used (could be bigger). size_t free_space_end_of_seg = heap_segment_committed (seg) - heap_segment_plan_allocated (seg); int relative_free_space_index = relative_index_power2_free_space (round_down_power2 (free_space_end_of_seg)); saved_ordered_free_space_indices[relative_free_space_index]++; } } if (use_bestfit) { memcpy (ordered_free_space_indices, saved_ordered_free_space_indices, sizeof(ordered_free_space_indices)); max_free_space_items = max (MIN_NUM_FREE_SPACES, free_space_items * 3 / 2); max_free_space_items = min (MAX_NUM_FREE_SPACES, max_free_space_items); dprintf (SEG_REUSE_LOG_0, ("could fit! %Id free spaces, %Id max", free_space_items, max_free_space_items)); } return can_fit; } dprintf (SEG_REUSE_LOG_0, ("(gen2)Couldn't fit, total free space is %Ix", (free_space + end_space))); return FALSE; } else { assert (settings.condemned_generation == (max_generation-1)); size_t free_space = (end_address - heap_segment_plan_allocated (seg)); size_t largest_free_space = free_space; dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p: gen1: testing segment [%Ix %Ix", first_address, end_address)); //find the first free list in range of the current segment uint8_t* free_list = 0; unsigned int a_l_idx = gen_allocator->first_suitable_bucket(eph_gen_starts); for (; a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++) { free_list = gen_allocator->alloc_list_head_of (a_l_idx); while (free_list) { if ((free_list >= first_address) && (free_list < end_address) && (unused_array_size (free_list) >= eph_gen_starts)) { goto next; } else { free_list = free_list_slot (free_list); } } } next: if (free_list) { init_ordered_free_space_indices (); if (process_free_space (seg, unused_array_size (free_list) - eph_gen_starts + Align (min_obj_size), min_free_size, min_cont_size, &free_space, &largest_free_space)) { return TRUE; } free_list = free_list_slot (free_list); } else { dprintf (SEG_REUSE_LOG_0, ("(gen1)Couldn't fit, no free list")); return FALSE; } //tally up free space while (1) { while (free_list) { if ((free_list >= first_address) && (free_list < end_address) && process_free_space (seg, unused_array_size (free_list), min_free_size, min_cont_size, &free_space, &largest_free_space)) { return TRUE; } free_list = free_list_slot (free_list); } a_l_idx++; if (a_l_idx < gen_allocator->number_of_buckets()) { free_list = gen_allocator->alloc_list_head_of (a_l_idx); } else break; } dprintf (SEG_REUSE_LOG_0, ("(gen1)Couldn't fit, total free space is %Ix", free_space)); return FALSE; /* BOOL can_fit = best_fit (free_space, 0, NULL); if (can_fit) { dprintf (SEG_REUSE_LOG_0, ("(gen1)Found segment %Ix to reuse with bestfit", seg)); } else { dprintf (SEG_REUSE_LOG_0, ("(gen1)Couldn't fit, total free space is %Ix", free_space)); } return can_fit; */ } } void gc_heap::realloc_plug (size_t last_plug_size, uint8_t*& last_plug, generation* gen, uint8_t* start_address, unsigned int& active_new_gen_number, uint8_t*& last_pinned_gap, BOOL& leftp, BOOL shortened_p #ifdef SHORT_PLUGS , mark* pinned_plug_entry #endif //SHORT_PLUGS ) { // detect generation boundaries // make sure that active_new_gen_number is not the youngest generation. // because the generation_limit wouldn't return the right thing in this case. if (!use_bestfit) { if ((active_new_gen_number > 1) && (last_plug >= generation_limit (active_new_gen_number))) { assert (last_plug >= start_address); active_new_gen_number--; realloc_plan_generation_start (generation_of (active_new_gen_number), gen); assert (generation_plan_allocation_start (generation_of (active_new_gen_number))); leftp = FALSE; } } // detect pinned plugs if (!pinned_plug_que_empty_p() && (last_plug == pinned_plug (oldest_pin()))) { size_t entry = deque_pinned_plug(); mark* m = pinned_plug_of (entry); size_t saved_pinned_len = pinned_len(m); pinned_len(m) = last_plug - last_pinned_gap; //dprintf (3,("Adjusting pinned gap: [%Ix, %Ix[", (size_t)last_pinned_gap, (size_t)last_plug)); if (m->has_post_plug_info()) { last_plug_size += sizeof (gap_reloc_pair); dprintf (3, ("ra pinned %Ix was shortened, adjusting plug size to %Ix", last_plug, last_plug_size)) } last_pinned_gap = last_plug + last_plug_size; dprintf (3, ("ra found pin %Ix, len: %Ix->%Ix, last_p: %Ix, last_p_size: %Ix", pinned_plug (m), saved_pinned_len, pinned_len (m), last_plug, last_plug_size)); leftp = FALSE; //we are creating a generation fault. set the cards. { size_t end_card = card_of (align_on_card (last_plug + last_plug_size)); size_t card = card_of (last_plug); while (card != end_card) { set_card (card); card++; } } } else if (last_plug >= start_address) { #ifdef FEATURE_STRUCTALIGN int requiredAlignment; ptrdiff_t pad; node_aligninfo (last_plug, requiredAlignment, pad); // from how we previously aligned the plug's destination address, // compute the actual alignment offset. uint8_t* reloc_plug = last_plug + node_relocation_distance (last_plug); ptrdiff_t alignmentOffset = ComputeStructAlignPad(reloc_plug, requiredAlignment, 0); if (!alignmentOffset) { // allocate_in_expanded_heap doesn't expect alignmentOffset to be zero. alignmentOffset = requiredAlignment; } //clear the alignment info because we are reallocating clear_node_aligninfo (last_plug); #else // FEATURE_STRUCTALIGN //clear the realignment flag because we are reallocating clear_node_realigned (last_plug); #endif // FEATURE_STRUCTALIGN BOOL adjacentp = FALSE; BOOL set_padding_on_saved_p = FALSE; if (shortened_p) { last_plug_size += sizeof (gap_reloc_pair); #ifdef SHORT_PLUGS assert (pinned_plug_entry != NULL); if (last_plug_size <= sizeof (plug_and_gap)) { set_padding_on_saved_p = TRUE; } #endif //SHORT_PLUGS dprintf (3, ("ra plug %Ix was shortened, adjusting plug size to %Ix", last_plug, last_plug_size)) } #ifdef SHORT_PLUGS clear_padding_in_expand (last_plug, set_padding_on_saved_p, pinned_plug_entry); #endif //SHORT_PLUGS uint8_t* new_address = allocate_in_expanded_heap(gen, last_plug_size, adjacentp, last_plug, #ifdef SHORT_PLUGS set_padding_on_saved_p, pinned_plug_entry, #endif //SHORT_PLUGS TRUE, active_new_gen_number REQD_ALIGN_AND_OFFSET_ARG); dprintf (3, ("ra NA: [%Ix, %Ix[: %Ix", new_address, (new_address + last_plug_size), last_plug_size)); assert (new_address); set_node_relocation_distance (last_plug, new_address - last_plug); #ifdef FEATURE_STRUCTALIGN if (leftp && node_alignpad (last_plug) == 0) #else // FEATURE_STRUCTALIGN if (leftp && !node_realigned (last_plug)) #endif // FEATURE_STRUCTALIGN { // TODO - temporarily disable L optimization because of a bug in it. //set_node_left (last_plug); } dprintf (3,(" Re-allocating %Ix->%Ix len %Id", (size_t)last_plug, (size_t)new_address, last_plug_size)); leftp = adjacentp; } } void gc_heap::realloc_in_brick (uint8_t* tree, uint8_t*& last_plug, uint8_t* start_address, generation* gen, unsigned int& active_new_gen_number, uint8_t*& last_pinned_gap, BOOL& leftp) { assert (tree != NULL); int left_node = node_left_child (tree); int right_node = node_right_child (tree); dprintf (3, ("ra: tree: %Ix, last_pin_gap: %Ix, last_p: %Ix, L: %d, R: %d", tree, last_pinned_gap, last_plug, left_node, right_node)); if (left_node) { dprintf (3, ("LN: realloc %Ix(%Ix)", (tree + left_node), last_plug)); realloc_in_brick ((tree + left_node), last_plug, start_address, gen, active_new_gen_number, last_pinned_gap, leftp); } if (last_plug != 0) { uint8_t* plug = tree; BOOL has_pre_plug_info_p = FALSE; BOOL has_post_plug_info_p = FALSE; mark* pinned_plug_entry = get_next_pinned_entry (tree, &has_pre_plug_info_p, &has_post_plug_info_p, FALSE); // We only care about the pre plug info 'cause that's what decides if the last plug is shortened. // The pinned plugs are handled in realloc_plug. size_t gap_size = node_gap_size (plug); uint8_t* gap = (plug - gap_size); uint8_t* last_plug_end = gap; size_t last_plug_size = (last_plug_end - last_plug); // Cannot assert this - a plug could be less than that due to the shortened ones. //assert (last_plug_size >= Align (min_obj_size)); dprintf (3, ("ra: plug %Ix, gap size: %Ix, last_pin_gap: %Ix, last_p: %Ix, last_p_end: %Ix, shortened: %d", plug, gap_size, last_pinned_gap, last_plug, last_plug_end, (has_pre_plug_info_p ? 1 : 0))); realloc_plug (last_plug_size, last_plug, gen, start_address, active_new_gen_number, last_pinned_gap, leftp, has_pre_plug_info_p #ifdef SHORT_PLUGS , pinned_plug_entry #endif //SHORT_PLUGS ); } last_plug = tree; if (right_node) { dprintf (3, ("RN: realloc %Ix(%Ix)", (tree + right_node), last_plug)); realloc_in_brick ((tree + right_node), last_plug, start_address, gen, active_new_gen_number, last_pinned_gap, leftp); } } void gc_heap::realloc_plugs (generation* consing_gen, heap_segment* seg, uint8_t* start_address, uint8_t* end_address, unsigned active_new_gen_number) { dprintf (3, ("--- Reallocing ---")); if (use_bestfit) { //make sure that every generation has a planned allocation start int gen_number = max_generation - 1; while (gen_number >= 0) { generation* gen = generation_of (gen_number); if (0 == generation_plan_allocation_start (gen)) { generation_plan_allocation_start (gen) = bestfit_first_pin + (max_generation - gen_number - 1) * Align (min_obj_size); generation_plan_allocation_start_size (gen) = Align (min_obj_size); assert (generation_plan_allocation_start (gen)); } gen_number--; } } uint8_t* first_address = start_address; //Look for the right pinned plug to start from. reset_pinned_queue_bos(); uint8_t* planned_ephemeral_seg_end = heap_segment_plan_allocated (seg); while (!pinned_plug_que_empty_p()) { mark* m = oldest_pin(); if ((pinned_plug (m) >= planned_ephemeral_seg_end) && (pinned_plug (m) < end_address)) { if (pinned_plug (m) < first_address) { first_address = pinned_plug (m); } break; } else deque_pinned_plug(); } size_t current_brick = brick_of (first_address); size_t end_brick = brick_of (end_address-1); uint8_t* last_plug = 0; uint8_t* last_pinned_gap = heap_segment_plan_allocated (seg); BOOL leftp = FALSE; dprintf (3, ("start addr: %Ix, first addr: %Ix, current oldest pin: %Ix", start_address, first_address, pinned_plug (oldest_pin()))); while (current_brick <= end_brick) { int brick_entry = brick_table [ current_brick ]; if (brick_entry >= 0) { realloc_in_brick ((brick_address (current_brick) + brick_entry - 1), last_plug, start_address, consing_gen, active_new_gen_number, last_pinned_gap, leftp); } current_brick++; } if (last_plug != 0) { realloc_plug (end_address - last_plug, last_plug, consing_gen, start_address, active_new_gen_number, last_pinned_gap, leftp, FALSE #ifdef SHORT_PLUGS , NULL #endif //SHORT_PLUGS ); } //Fix the old segment allocated size assert (last_pinned_gap >= heap_segment_mem (seg)); assert (last_pinned_gap <= heap_segment_committed (seg)); heap_segment_plan_allocated (seg) = last_pinned_gap; } void gc_heap::set_expand_in_full_gc (int condemned_gen_number) { if (!should_expand_in_full_gc) { if ((condemned_gen_number != max_generation) && (settings.pause_mode != pause_low_latency) && (settings.pause_mode != pause_sustained_low_latency)) { should_expand_in_full_gc = TRUE; } } } void gc_heap::save_ephemeral_generation_starts() { for (int ephemeral_generation = 0; ephemeral_generation < max_generation; ephemeral_generation++) { saved_ephemeral_plan_start[ephemeral_generation] = generation_plan_allocation_start (generation_of (ephemeral_generation)); saved_ephemeral_plan_start_size[ephemeral_generation] = generation_plan_allocation_start_size (generation_of (ephemeral_generation)); } } generation* gc_heap::expand_heap (int condemned_generation, generation* consing_gen, heap_segment* new_heap_segment) { #ifndef _DEBUG UNREFERENCED_PARAMETER(condemned_generation); #endif //!_DEBUG assert (condemned_generation >= (max_generation -1)); unsigned int active_new_gen_number = max_generation; //Set one too high to get generation gap uint8_t* start_address = generation_limit (max_generation); uint8_t* end_address = heap_segment_allocated (ephemeral_heap_segment); BOOL should_promote_ephemeral = FALSE; ptrdiff_t eph_size = total_ephemeral_size; #ifdef BACKGROUND_GC dprintf(2,("%s: ---- Heap Expansion ----", (gc_heap::background_running_p() ? "FGC" : "NGC"))); #endif //BACKGROUND_GC settings.heap_expansion = TRUE; //reset the elevation state for next time. dprintf (2, ("Elevation: elevation = el_none")); if (settings.should_lock_elevation && !expand_reused_seg_p()) settings.should_lock_elevation = FALSE; heap_segment* new_seg = new_heap_segment; if (!new_seg) return consing_gen; //copy the card and brick tables if (g_gc_card_table!= card_table) copy_brick_card_table(); BOOL new_segment_p = (heap_segment_next (new_seg) == 0); dprintf (2, ("new_segment_p %Ix", (size_t)new_segment_p)); assert (generation_plan_allocation_start (generation_of (max_generation-1))); assert (generation_plan_allocation_start (generation_of (max_generation-1)) >= heap_segment_mem (ephemeral_heap_segment)); assert (generation_plan_allocation_start (generation_of (max_generation-1)) <= heap_segment_committed (ephemeral_heap_segment)); assert (generation_plan_allocation_start (youngest_generation)); assert (generation_plan_allocation_start (youngest_generation) < heap_segment_plan_allocated (ephemeral_heap_segment)); if (settings.pause_mode == pause_no_gc) { // We don't reuse for no gc, so the size used on the new eph seg is eph_size. if ((size_t)(heap_segment_reserved (new_seg) - heap_segment_mem (new_seg)) < (eph_size + soh_allocation_no_gc)) should_promote_ephemeral = TRUE; } else { if (!use_bestfit) { should_promote_ephemeral = dt_low_ephemeral_space_p (tuning_deciding_promote_ephemeral); } } if (should_promote_ephemeral) { ephemeral_promotion = TRUE; get_gc_data_per_heap()->set_mechanism (gc_heap_expand, expand_new_seg_ep); dprintf (2, ("promoting ephemeral")); save_ephemeral_generation_starts(); // We also need to adjust free_obj_space (due to padding) here because now young gens' free_obj_space will // belong to gen2. generation* max_gen = generation_of (max_generation); for (int i = 1; i < max_generation; i++) { generation_free_obj_space (max_gen) += generation_free_obj_space (generation_of (i)); dprintf (2, ("[h%d] maxgen freeobj + %Id=%Id", heap_number, generation_free_obj_space (generation_of (i)), generation_free_obj_space (max_gen))); } // TODO: This is actually insufficient - if BACKGROUND_GC is not defined we'd need to commit more // in order to accommodate eph gen starts. Also in the no_gc we should make sure used // is updated correctly. heap_segment_used (new_seg) = heap_segment_committed (new_seg); } else { // commit the new ephemeral segment all at once if it is a new one. if ((eph_size > 0) && new_segment_p) { #ifdef FEATURE_STRUCTALIGN // The destination may require a larger alignment padding than the source. // Assume the worst possible alignment padding. eph_size += ComputeStructAlignPad(heap_segment_mem (new_seg), MAX_STRUCTALIGN, OBJECT_ALIGNMENT_OFFSET); #endif // FEATURE_STRUCTALIGN #ifdef RESPECT_LARGE_ALIGNMENT //Since the generation start can be larger than min_obj_size //The alignment could be switched. eph_size += switch_alignment_size(FALSE); #endif //RESPECT_LARGE_ALIGNMENT //Since the generation start can be larger than min_obj_size //Compare the alignment of the first object in gen1 if (grow_heap_segment (new_seg, heap_segment_mem (new_seg) + eph_size) == 0) { fgm_result.set_fgm (fgm_commit_eph_segment, eph_size, FALSE); return consing_gen; } heap_segment_used (new_seg) = heap_segment_committed (new_seg); } //Fix the end of the old ephemeral heap segment heap_segment_plan_allocated (ephemeral_heap_segment) = generation_plan_allocation_start (generation_of (max_generation-1)); dprintf (3, ("Old ephemeral allocated set to %Ix", (size_t)heap_segment_plan_allocated (ephemeral_heap_segment))); } if (new_segment_p) { // TODO - Is this really necessary? We should think about it. //initialize the first brick size_t first_brick = brick_of (heap_segment_mem (new_seg)); set_brick (first_brick, heap_segment_mem (new_seg) - brick_address (first_brick)); } //From this point on, we cannot run out of memory //reset the allocation of the consing generation back to the end of the //old ephemeral segment generation_allocation_limit (consing_gen) = heap_segment_plan_allocated (ephemeral_heap_segment); generation_allocation_pointer (consing_gen) = generation_allocation_limit (consing_gen); generation_allocation_segment (consing_gen) = ephemeral_heap_segment; //clear the generation gap for all of the ephemeral generations { int generation_num = max_generation-1; while (generation_num >= 0) { generation* gen = generation_of (generation_num); generation_plan_allocation_start (gen) = 0; generation_num--; } } heap_segment* old_seg = ephemeral_heap_segment; ephemeral_heap_segment = new_seg; //Note: the ephemeral segment shouldn't be threaded onto the segment chain //because the relocation and compact phases shouldn't see it // set the generation members used by allocate_in_expanded_heap // and switch to ephemeral generation consing_gen = ensure_ephemeral_heap_segment (consing_gen); if (!should_promote_ephemeral) { realloc_plugs (consing_gen, old_seg, start_address, end_address, active_new_gen_number); } if (!use_bestfit) { repair_allocation_in_expanded_heap (consing_gen); } // assert that the generation gap for all of the ephemeral generations were allocated. #ifdef _DEBUG { int generation_num = max_generation-1; while (generation_num >= 0) { generation* gen = generation_of (generation_num); assert (generation_plan_allocation_start (gen)); generation_num--; } } #endif // _DEBUG if (!new_segment_p) { dprintf (2, ("Demoting ephemeral segment")); //demote the entire segment. settings.demotion = TRUE; get_gc_data_per_heap()->set_mechanism_bit (gc_demotion_bit); demotion_low = heap_segment_mem (ephemeral_heap_segment); demotion_high = heap_segment_reserved (ephemeral_heap_segment); } else { demotion_low = MAX_PTR; demotion_high = 0; #ifndef MULTIPLE_HEAPS settings.demotion = FALSE; get_gc_data_per_heap()->clear_mechanism_bit (gc_demotion_bit); #endif //!MULTIPLE_HEAPS } if (!should_promote_ephemeral && new_segment_p) { assert ((ptrdiff_t)total_ephemeral_size <= eph_size); } if (heap_segment_mem (old_seg) == heap_segment_plan_allocated (old_seg)) { // This is to catch when we accidently delete a segment that has pins. verify_no_pins (heap_segment_mem (old_seg), heap_segment_reserved (old_seg)); } verify_no_pins (heap_segment_plan_allocated (old_seg), heap_segment_reserved(old_seg)); dprintf(2,("---- End of Heap Expansion ----")); return consing_gen; } #endif //!USE_REGIONS BOOL gc_heap::expand_reused_seg_p() { #ifdef USE_REGIONS return FALSE; #else BOOL reused_seg = FALSE; int heap_expand_mechanism = gc_data_per_heap.get_mechanism (gc_heap_expand); if ((heap_expand_mechanism == expand_reuse_bestfit) || (heap_expand_mechanism == expand_reuse_normal)) { reused_seg = TRUE; } return reused_seg; #endif //USE_REGIONS } void gc_heap::verify_no_pins (uint8_t* start, uint8_t* end) { #ifdef VERIFY_HEAP if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { BOOL contains_pinned_plugs = FALSE; size_t mi = 0; mark* m = 0; while (mi != mark_stack_tos) { m = pinned_plug_of (mi); if ((pinned_plug (m) >= start) && (pinned_plug (m) < end)) { contains_pinned_plugs = TRUE; break; } else mi++; } if (contains_pinned_plugs) { FATAL_GC_ERROR(); } } #endif //VERIFY_HEAP } void gc_heap::set_static_data() { static_data* pause_mode_sdata = static_data_table[latency_level]; for (int i = 0; i < total_generation_count; i++) { dynamic_data* dd = dynamic_data_of (i); static_data* sdata = &pause_mode_sdata[i]; dd->sdata = sdata; dd->min_size = sdata->min_size; dprintf (GTC_LOG, ("PM: %d, gen%d: min: %Id, max: %Id, fr_l: %Id, fr_b: %d%%", settings.pause_mode,i, dd->min_size, dd_max_size (dd), sdata->fragmentation_limit, (int)(sdata->fragmentation_burden_limit * 100))); } } // Initialize the values that are not const. void gc_heap::init_static_data() { size_t gen0_min_size = get_gen0_min_size(); size_t gen0_max_size = #ifdef MULTIPLE_HEAPS max (6*1024*1024, min ( Align(soh_segment_size/2), 200*1024*1024)); #else //MULTIPLE_HEAPS ( #ifdef BACKGROUND_GC gc_can_use_concurrent ? 6*1024*1024 : #endif //BACKGROUND_GC max (6*1024*1024, min ( Align(soh_segment_size/2), 200*1024*1024)) ); #endif //MULTIPLE_HEAPS gen0_max_size = max (gen0_min_size, gen0_max_size); if (heap_hard_limit) { size_t gen0_max_size_seg = soh_segment_size / 4; dprintf (GTC_LOG, ("limit gen0 max %Id->%Id", gen0_max_size, gen0_max_size_seg)); gen0_max_size = min (gen0_max_size, gen0_max_size_seg); } size_t gen0_max_size_config = (size_t)GCConfig::GetGCGen0MaxBudget(); if (gen0_max_size_config) { gen0_max_size = min (gen0_max_size, gen0_max_size_config); #ifdef FEATURE_EVENT_TRACE gen0_max_budget_from_config = gen0_max_size; #endif //FEATURE_EVENT_TRACE } gen0_max_size = Align (gen0_max_size); gen0_min_size = min (gen0_min_size, gen0_max_size); // TODO: gen0_max_size has a 200mb cap; gen1_max_size should also have a cap. size_t gen1_max_size = (size_t) #ifdef MULTIPLE_HEAPS max (6*1024*1024, Align(soh_segment_size/2)); #else //MULTIPLE_HEAPS ( #ifdef BACKGROUND_GC gc_can_use_concurrent ? 6*1024*1024 : #endif //BACKGROUND_GC max (6*1024*1024, Align(soh_segment_size/2)) ); #endif //MULTIPLE_HEAPS size_t gen1_max_size_config = (size_t)GCConfig::GetGCGen1MaxBudget(); if (gen1_max_size_config) { gen1_max_size = min (gen1_max_size, gen1_max_size_config); } gen1_max_size = Align (gen1_max_size); dprintf (GTC_LOG, ("gen0 min: %Id, max: %Id, gen1 max: %Id", gen0_min_size, gen0_max_size, gen1_max_size)); for (int i = latency_level_first; i <= latency_level_last; i++) { static_data_table[i][0].min_size = gen0_min_size; static_data_table[i][0].max_size = gen0_max_size; static_data_table[i][1].max_size = gen1_max_size; } } bool gc_heap::init_dynamic_data() { uint64_t now_raw_ts = RawGetHighPrecisionTimeStamp (); #ifdef HEAP_BALANCE_INSTRUMENTATION start_raw_ts = now_raw_ts; #endif //HEAP_BALANCE_INSTRUMENTATION uint64_t now = (uint64_t)((double)now_raw_ts * qpf_us); set_static_data(); if (heap_number == 0) { process_start_time = now; smoothed_desired_per_heap[0] = dynamic_data_of (0)->min_size; #ifdef HEAP_BALANCE_INSTRUMENTATION last_gc_end_time_us = now; dprintf (HEAP_BALANCE_LOG, ("qpf=%I64d, start: %I64d(%d)", qpf, start_raw_ts, now)); #endif //HEAP_BALANCE_INSTRUMENTATION } for (int i = 0; i < total_generation_count; i++) { dynamic_data* dd = dynamic_data_of (i); dd->gc_clock = 0; dd->time_clock = now; dd->previous_time_clock = now; dd->current_size = 0; dd->promoted_size = 0; dd->collection_count = 0; dd->new_allocation = dd->min_size; dd->gc_new_allocation = dd->new_allocation; dd->desired_allocation = dd->new_allocation; dd->fragmentation = 0; } return true; } float gc_heap::surv_to_growth (float cst, float limit, float max_limit) { if (cst < ((max_limit - limit ) / (limit * (max_limit-1.0f)))) return ((limit - limit*cst) / (1.0f - (cst * limit))); else return max_limit; } //if the allocation budget wasn't exhausted, the new budget may be wrong because the survival may //not be correct (collection happened too soon). Correct with a linear estimation based on the previous //value of the budget static size_t linear_allocation_model (float allocation_fraction, size_t new_allocation, size_t previous_desired_allocation, float time_since_previous_collection_secs) { if ((allocation_fraction < 0.95) && (allocation_fraction > 0.0)) { const float decay_time = 5*60.0f; // previous desired allocation expires over 5 minutes float decay_factor = (decay_time <= time_since_previous_collection_secs) ? 0 : ((decay_time - time_since_previous_collection_secs) / decay_time); float previous_allocation_factor = (1.0f - allocation_fraction) * decay_factor; dprintf (2, ("allocation fraction: %d, decay factor: %d, previous allocation factor: %d", (int)(allocation_fraction*100.0), (int)(decay_factor*100.0), (int)(previous_allocation_factor*100.0))); new_allocation = (size_t)((1.0 - previous_allocation_factor)*new_allocation + previous_allocation_factor * previous_desired_allocation); } return new_allocation; } size_t gc_heap::desired_new_allocation (dynamic_data* dd, size_t out, int gen_number, int pass) { gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap(); if (dd_begin_data_size (dd) == 0) { size_t new_allocation = dd_min_size (dd); current_gc_data_per_heap->gen_data[gen_number].new_allocation = new_allocation; return new_allocation; } else { float cst; size_t previous_desired_allocation = dd_desired_allocation (dd); size_t current_size = dd_current_size (dd); float max_limit = dd_max_limit (dd); float limit = dd_limit (dd); size_t min_gc_size = dd_min_size (dd); float f = 0; size_t max_size = dd_max_size (dd); size_t new_allocation = 0; float time_since_previous_collection_secs = (dd_time_clock (dd) - dd_previous_time_clock (dd))*1e-6f; float allocation_fraction = (float) (dd_desired_allocation (dd) - dd_gc_new_allocation (dd)) / (float) (dd_desired_allocation (dd)); if (gen_number >= max_generation) { size_t new_size = 0; cst = min (1.0f, float (out) / float (dd_begin_data_size (dd))); f = surv_to_growth (cst, limit, max_limit); if (conserve_mem_setting != 0) { // if this is set, compute a growth factor based on it. // example: a setting of 6 means we have a goal of 60% live data // this means we allow 40% fragmentation // to keep heap size stable, we only use half of that (20%) for new allocation // f is (live data + new allocation)/(live data), so would be (60% + 20%) / 60% or 1.33 float f_conserve = ((10.0f / conserve_mem_setting) - 1) * 0.5f + 1.0f; // use the smaller one f = min (f, f_conserve); } size_t max_growth_size = (size_t)(max_size / f); if (current_size >= max_growth_size) { new_size = max_size; } else { new_size = (size_t) min (max ( (f * current_size), min_gc_size), max_size); } assert ((new_size >= current_size) || (new_size == max_size)); if (gen_number == max_generation) { new_allocation = max((new_size - current_size), min_gc_size); new_allocation = linear_allocation_model (allocation_fraction, new_allocation, dd_desired_allocation (dd), time_since_previous_collection_secs); if ( #ifdef BGC_SERVO_TUNING !bgc_tuning::fl_tuning_triggered && #endif //BGC_SERVO_TUNING (conserve_mem_setting == 0) && (dd_fragmentation (dd) > ((size_t)((f-1)*current_size)))) { //reducing allocation in case of fragmentation size_t new_allocation1 = max (min_gc_size, // CAN OVERFLOW (size_t)((float)new_allocation * current_size / ((float)current_size + 2*dd_fragmentation (dd)))); dprintf (2, ("Reducing max_gen allocation due to fragmentation from %Id to %Id", new_allocation, new_allocation1)); new_allocation = new_allocation1; } } else // not a SOH generation { uint32_t memory_load = 0; uint64_t available_physical = 0; get_memory_info (&memory_load, &available_physical); #ifdef TRACE_GC if (heap_hard_limit) { size_t allocated = 0; size_t committed = uoh_committed_size (gen_number, &allocated); dprintf (1, ("GC#%Id h%d, GMI: UOH budget, UOH commit %Id (obj %Id, frag %Id), total commit: %Id (recorded: %Id)", (size_t)settings.gc_index, heap_number, committed, allocated, dd_fragmentation (dynamic_data_of (gen_number)), get_total_committed_size(), (current_total_committed - current_total_committed_bookkeeping))); } #endif //TRACE_GC if (heap_number == 0) settings.exit_memory_load = memory_load; if (available_physical > 1024*1024) available_physical -= 1024*1024; uint64_t available_free = available_physical + (uint64_t)generation_free_list_space (generation_of (gen_number)); if (available_free > (uint64_t)MAX_PTR) { available_free = (uint64_t)MAX_PTR; } //try to avoid OOM during large object allocation new_allocation = max (min(max((new_size - current_size), dd_desired_allocation (dynamic_data_of (max_generation))), (size_t)available_free), max ((current_size/4), min_gc_size)); new_allocation = linear_allocation_model (allocation_fraction, new_allocation, dd_desired_allocation (dd), time_since_previous_collection_secs); } } else { size_t survivors = out; cst = float (survivors) / float (dd_begin_data_size (dd)); f = surv_to_growth (cst, limit, max_limit); new_allocation = (size_t) min (max ((f * (survivors)), min_gc_size), max_size); new_allocation = linear_allocation_model (allocation_fraction, new_allocation, dd_desired_allocation (dd), time_since_previous_collection_secs); if (gen_number == 0) { if (pass == 0) { size_t free_space = generation_free_list_space (generation_of (gen_number)); // DTREVIEW - is min_gc_size really a good choice? // on 64-bit this will almost always be true. dprintf (GTC_LOG, ("frag: %Id, min: %Id", free_space, min_gc_size)); if (free_space > min_gc_size) { settings.gen0_reduction_count = 2; } else { if (settings.gen0_reduction_count > 0) settings.gen0_reduction_count--; } } if (settings.gen0_reduction_count > 0) { dprintf (2, ("Reducing new allocation based on fragmentation")); new_allocation = min (new_allocation, max (min_gc_size, (max_size/3))); } } } size_t new_allocation_ret = Align (new_allocation, get_alignment_constant (gen_number <= max_generation)); int gen_data_index = gen_number; gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_data_index]); gen_data->new_allocation = new_allocation_ret; dd_surv (dd) = cst; dprintf (1, (ThreadStressLog::gcDesiredNewAllocationMsg(), heap_number, gen_number, out, current_size, (dd_desired_allocation (dd) - dd_gc_new_allocation (dd)), (int)(cst*100), (int)(f*100), current_size + new_allocation, new_allocation)); return new_allocation_ret; } } // REGIONS TODO: this can be merged with generation_size. //returns the planned size of a generation (including free list element) size_t gc_heap::generation_plan_size (int gen_number) { #ifdef USE_REGIONS size_t result = 0; heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (gen_number))); while (seg) { uint8_t* end = heap_segment_plan_allocated (seg); result += end - heap_segment_mem (seg); dprintf (REGIONS_LOG, ("h%d size + %Id (%Ix - %Ix) -> %Id", heap_number, (end - heap_segment_mem (seg)), heap_segment_mem (seg), end, result)); seg = heap_segment_next (seg); } return result; #else //USE_REGIONS if (0 == gen_number) return max((heap_segment_plan_allocated (ephemeral_heap_segment) - generation_plan_allocation_start (generation_of (gen_number))), (int)Align (min_obj_size)); else { generation* gen = generation_of (gen_number); if (heap_segment_rw (generation_start_segment (gen)) == ephemeral_heap_segment) return (generation_plan_allocation_start (generation_of (gen_number - 1)) - generation_plan_allocation_start (generation_of (gen_number))); else { size_t gensize = 0; heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(seg != NULL); while (seg && (seg != ephemeral_heap_segment)) { gensize += heap_segment_plan_allocated (seg) - heap_segment_mem (seg); seg = heap_segment_next_rw (seg); } if (seg) { gensize += (generation_plan_allocation_start (generation_of (gen_number - 1)) - heap_segment_mem (ephemeral_heap_segment)); } return gensize; } } #endif //USE_REGIONS } //returns the size of a generation (including free list element) size_t gc_heap::generation_size (int gen_number) { #ifdef USE_REGIONS size_t result = 0; heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (gen_number))); while (seg) { uint8_t* end = heap_segment_allocated (seg); result += end - heap_segment_mem (seg); dprintf (2, ("h%d size + %Id (%Ix - %Ix) -> %Id", heap_number, (end - heap_segment_mem (seg)), heap_segment_mem (seg), end, result)); seg = heap_segment_next (seg); } return result; #else //USE_REGIONS if (0 == gen_number) return max((heap_segment_allocated (ephemeral_heap_segment) - generation_allocation_start (generation_of (gen_number))), (int)Align (min_obj_size)); else { generation* gen = generation_of (gen_number); if (heap_segment_rw (generation_start_segment (gen)) == ephemeral_heap_segment) return (generation_allocation_start (generation_of (gen_number - 1)) - generation_allocation_start (generation_of (gen_number))); else { size_t gensize = 0; heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(seg != NULL); while (seg && (seg != ephemeral_heap_segment)) { gensize += heap_segment_allocated (seg) - heap_segment_mem (seg); seg = heap_segment_next_rw (seg); } if (seg) { gensize += (generation_allocation_start (generation_of (gen_number - 1)) - heap_segment_mem (ephemeral_heap_segment)); } return gensize; } } #endif //USE_REGIONS } size_t gc_heap::compute_in (int gen_number) { assert (gen_number != 0); dynamic_data* dd = dynamic_data_of (gen_number); size_t in = generation_allocation_size (generation_of (gen_number)); if (gen_number == max_generation && ephemeral_promotion) { in = 0; for (int i = 0; i <= max_generation; i++) { dynamic_data* dd = dynamic_data_of (i); in += dd_survived_size (dd); if (i != max_generation) { generation_condemned_allocated (generation_of (gen_number)) += dd_survived_size (dd); } } } dd_gc_new_allocation (dd) -= in; dd_new_allocation (dd) = dd_gc_new_allocation (dd); gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap(); gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_number]); gen_data->in = in; generation_allocation_size (generation_of (gen_number)) = 0; return in; } void gc_heap::compute_promoted_allocation (int gen_number) { compute_in (gen_number); } #ifdef HOST_64BIT inline size_t gc_heap::trim_youngest_desired (uint32_t memory_load, size_t total_new_allocation, size_t total_min_allocation) { if (memory_load < MAX_ALLOWED_MEM_LOAD) { // If the total of memory load and gen0 budget exceeds // our max memory load limit, trim the gen0 budget so the total // is the max memory load limit. size_t remain_memory_load = (MAX_ALLOWED_MEM_LOAD - memory_load) * mem_one_percent; return min (total_new_allocation, remain_memory_load); } else { size_t total_max_allocation = max (mem_one_percent, total_min_allocation); return min (total_new_allocation, total_max_allocation); } } size_t gc_heap::joined_youngest_desired (size_t new_allocation) { dprintf (2, ("Entry memory load: %d; gen0 new_alloc: %Id", settings.entry_memory_load, new_allocation)); size_t final_new_allocation = new_allocation; if (new_allocation > MIN_YOUNGEST_GEN_DESIRED) { uint32_t num_heaps = 1; #ifdef MULTIPLE_HEAPS num_heaps = gc_heap::n_heaps; #endif //MULTIPLE_HEAPS size_t total_new_allocation = new_allocation * num_heaps; size_t total_min_allocation = MIN_YOUNGEST_GEN_DESIRED * num_heaps; if ((settings.entry_memory_load >= MAX_ALLOWED_MEM_LOAD) || (total_new_allocation > max (youngest_gen_desired_th, total_min_allocation))) { uint32_t memory_load = 0; get_memory_info (&memory_load); settings.exit_memory_load = memory_load; dprintf (2, ("Current memory load: %d", memory_load)); size_t final_total = trim_youngest_desired (memory_load, total_new_allocation, total_min_allocation); size_t max_new_allocation = #ifdef MULTIPLE_HEAPS dd_max_size (g_heaps[0]->dynamic_data_of (0)); #else //MULTIPLE_HEAPS dd_max_size (dynamic_data_of (0)); #endif //MULTIPLE_HEAPS final_new_allocation = min (Align ((final_total / num_heaps), get_alignment_constant (TRUE)), max_new_allocation); } } if (final_new_allocation < new_allocation) { settings.gen0_reduction_count = 2; } return final_new_allocation; } #endif // HOST_64BIT inline gc_history_global* gc_heap::get_gc_data_global() { #ifdef BACKGROUND_GC return (settings.concurrent ? &bgc_data_global : &gc_data_global); #else return &gc_data_global; #endif //BACKGROUND_GC } inline gc_history_per_heap* gc_heap::get_gc_data_per_heap() { #ifdef BACKGROUND_GC return (settings.concurrent ? &bgc_data_per_heap : &gc_data_per_heap); #else return &gc_data_per_heap; #endif //BACKGROUND_GC } void gc_heap::compute_new_dynamic_data (int gen_number) { PREFIX_ASSUME(gen_number >= 0); PREFIX_ASSUME(gen_number <= max_generation); dynamic_data* dd = dynamic_data_of (gen_number); generation* gen = generation_of (gen_number); size_t in = (gen_number==0) ? 0 : compute_in (gen_number); size_t total_gen_size = generation_size (gen_number); //keep track of fragmentation dd_fragmentation (dd) = generation_free_list_space (gen) + generation_free_obj_space (gen); dd_current_size (dd) = total_gen_size - dd_fragmentation (dd); gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap(); size_t out = dd_survived_size (dd); gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_number]); gen_data->size_after = total_gen_size; gen_data->free_list_space_after = generation_free_list_space (gen); gen_data->free_obj_space_after = generation_free_obj_space (gen); if ((settings.pause_mode == pause_low_latency) && (gen_number <= 1)) { // When we are in the low latency mode, we can still be // condemning more than gen1's 'cause of induced GCs. dd_desired_allocation (dd) = low_latency_alloc; dd_gc_new_allocation (dd) = dd_desired_allocation (dd); dd_new_allocation (dd) = dd_gc_new_allocation (dd); } else { if (gen_number == 0) { //compensate for dead finalizable objects promotion. //they shoudn't be counted for growth. size_t final_promoted = 0; final_promoted = min (finalization_promoted_bytes, out); // Prefast: this is clear from above but prefast needs to be told explicitly PREFIX_ASSUME(final_promoted <= out); dprintf (2, ("gen: %d final promoted: %Id", gen_number, final_promoted)); dd_freach_previous_promotion (dd) = final_promoted; size_t lower_bound = desired_new_allocation (dd, out-final_promoted, gen_number, 0); if (settings.condemned_generation == 0) { //there is no noise. dd_desired_allocation (dd) = lower_bound; } else { size_t higher_bound = desired_new_allocation (dd, out, gen_number, 1); // <TODO>This assert was causing AppDomains\unload\test1n\test1nrun.bat to fail</TODO> //assert ( lower_bound <= higher_bound); //discount the noise. Change the desired allocation //only if the previous value is outside of the range. if (dd_desired_allocation (dd) < lower_bound) { dd_desired_allocation (dd) = lower_bound; } else if (dd_desired_allocation (dd) > higher_bound) { dd_desired_allocation (dd) = higher_bound; } #if defined (HOST_64BIT) && !defined (MULTIPLE_HEAPS) dd_desired_allocation (dd) = joined_youngest_desired (dd_desired_allocation (dd)); #endif // HOST_64BIT && !MULTIPLE_HEAPS trim_youngest_desired_low_memory(); dprintf (2, ("final gen0 new_alloc: %Id", dd_desired_allocation (dd))); } } else { dd_desired_allocation (dd) = desired_new_allocation (dd, out, gen_number, 0); } dd_gc_new_allocation (dd) = dd_desired_allocation (dd); // we may have had some incoming objects during this GC - // adjust the consumed budget for these dd_new_allocation (dd) = dd_gc_new_allocation (dd) - in; } gen_data->pinned_surv = dd_pinned_survived_size (dd); gen_data->npinned_surv = dd_survived_size (dd) - dd_pinned_survived_size (dd); dd_promoted_size (dd) = out; if (gen_number == max_generation) { for (int i = (gen_number + 1); i < total_generation_count; i++) { dd = dynamic_data_of (i); total_gen_size = generation_size (i); generation* gen = generation_of (i); dd_fragmentation (dd) = generation_free_list_space (gen) + generation_free_obj_space (gen); dd_current_size (dd) = total_gen_size - dd_fragmentation (dd); dd_survived_size (dd) = dd_current_size (dd); in = 0; out = dd_current_size (dd); dd_desired_allocation (dd) = desired_new_allocation (dd, out, i, 0); dd_gc_new_allocation (dd) = Align (dd_desired_allocation (dd), get_alignment_constant (FALSE)); dd_new_allocation (dd) = dd_gc_new_allocation (dd); gen_data = &(current_gc_data_per_heap->gen_data[i]); gen_data->size_after = total_gen_size; gen_data->free_list_space_after = generation_free_list_space (gen); gen_data->free_obj_space_after = generation_free_obj_space (gen); gen_data->npinned_surv = out; #ifdef BACKGROUND_GC if (i == loh_generation) end_loh_size = total_gen_size; if (i == poh_generation) end_poh_size = total_gen_size; #endif //BACKGROUND_GC dd_promoted_size (dd) = out; } } } void gc_heap::trim_youngest_desired_low_memory() { if (g_low_memory_status) { size_t committed_mem = committed_size(); dynamic_data* dd = dynamic_data_of (0); size_t current = dd_desired_allocation (dd); size_t candidate = max (Align ((committed_mem / 10), get_alignment_constant(FALSE)), dd_min_size (dd)); dd_desired_allocation (dd) = min (current, candidate); } } ptrdiff_t gc_heap::estimate_gen_growth (int gen_number) { dynamic_data* dd_gen = dynamic_data_of (gen_number); generation *gen = generation_of (gen_number); ptrdiff_t new_allocation_gen = dd_new_allocation (dd_gen); ptrdiff_t free_list_space_gen = generation_free_list_space (gen); #ifdef USE_REGIONS // in the case of regions, we assume all the space up to reserved gets used before we get a new region for this gen ptrdiff_t reserved_not_in_use = 0; ptrdiff_t allocated_gen = 0; for (heap_segment* region = generation_start_segment_rw (gen); region != nullptr; region = heap_segment_next (region)) { allocated_gen += heap_segment_allocated (region) - heap_segment_mem (region); reserved_not_in_use += heap_segment_reserved (region) - heap_segment_allocated (region); } // compute how much of the allocated space is on the free list double free_list_fraction_gen = (allocated_gen == 0) ? 0.0 : (double)(free_list_space_gen) / (double)allocated_gen; // estimate amount of usable free space // e.g. if 90% of the allocated space is free, assume 90% of these 90% can get used // e.g. if 10% of the allocated space is free, assume 10% of these 10% can get used ptrdiff_t usable_free_space = (ptrdiff_t)(free_list_fraction_gen * free_list_space_gen); ptrdiff_t budget_gen = new_allocation_gen - usable_free_space - reserved_not_in_use; dprintf(1, ("h%2d gen %d budget %8Id allocated: %8Id, FL: %8Id, reserved_not_in_use %8Id budget_gen %8Id", heap_number, gen_number, new_allocation_gen, allocated_gen, free_list_space_gen, reserved_not_in_use, budget_gen)); #else //USE_REGIONS // estimate how we are going to need in this generation - estimate half the free list space gets used ptrdiff_t budget_gen = new_allocation_gen - (free_list_space_gen / 2); dprintf (REGIONS_LOG, ("budget for gen %d on heap %d is %Id (new %Id, free %Id)", gen_number, heap_number, budget_gen, new_allocation_gen, free_list_space_gen)); #endif //USE_REGIONS return budget_gen; } void gc_heap::decommit_ephemeral_segment_pages() { if (settings.concurrent || use_large_pages_p || (settings.pause_mode == pause_no_gc)) { return; } #if defined(MULTIPLE_HEAPS) && defined(USE_REGIONS) for (int gen_number = soh_gen0; gen_number <= soh_gen1; gen_number++) { generation *gen = generation_of (gen_number); heap_segment* tail_region = generation_tail_region (gen); uint8_t* previous_decommit_target = heap_segment_decommit_target (tail_region); // reset the decommit targets to make sure we don't decommit inadvertently for (heap_segment* region = generation_start_segment_rw (gen); region != nullptr; region = heap_segment_next (region)) { heap_segment_decommit_target (region) = heap_segment_reserved (region); } ptrdiff_t budget_gen = estimate_gen_growth (gen_number) + loh_size_threshold; if (budget_gen >= 0) { // we need more than the regions we have - nothing to decommit continue; } // we may have too much committed - let's see if we can decommit in the tail region ptrdiff_t tail_region_size = heap_segment_reserved (tail_region) - heap_segment_mem (tail_region); ptrdiff_t unneeded_tail_size = min (-budget_gen, tail_region_size); uint8_t *decommit_target = heap_segment_reserved (tail_region) - unneeded_tail_size; decommit_target = max (decommit_target, heap_segment_allocated (tail_region)); if (decommit_target < previous_decommit_target) { // we used to have a higher target - do exponential smoothing by computing // essentially decommit_target = 1/3*decommit_target + 2/3*previous_decommit_target // computation below is slightly different to avoid overflow ptrdiff_t target_decrease = previous_decommit_target - decommit_target; decommit_target += target_decrease * 2 / 3; } //#define STRESS_DECOMMIT 1 #ifdef STRESS_DECOMMIT // our decommit logic should work for a random decommit target within tail_region - make sure it does decommit_target = heap_segment_mem (tail_region) + gc_rand::get_rand (heap_segment_reserved (tail_region) - heap_segment_mem (tail_region)); #endif //STRESS_DECOMMIT heap_segment_decommit_target (tail_region) = decommit_target; if (decommit_target < heap_segment_committed (tail_region)) { gradual_decommit_in_progress_p = TRUE; dprintf (1, ("h%2d gen %d reduce_commit by %IdkB", heap_number, gen_number, (heap_segment_committed (tail_region) - decommit_target)/1024)); } dprintf(3, ("h%2d gen %d allocated: %IdkB committed: %IdkB target: %IdkB", heap_number, gen_number, (heap_segment_allocated (tail_region) - heap_segment_mem (tail_region))/1024, (heap_segment_committed (tail_region) - heap_segment_mem (tail_region))/1024, (decommit_target - heap_segment_mem (tail_region))/1024)); } #else //MULTIPLE_HEAPS && USE_REGIONS dynamic_data* dd0 = dynamic_data_of (0); ptrdiff_t desired_allocation = dd_new_allocation (dd0) + max (estimate_gen_growth (soh_gen1), 0) + loh_size_threshold; size_t slack_space = #ifdef HOST_64BIT max(min(min(soh_segment_size/32, dd_max_size (dd0)), (generation_size (max_generation) / 10)), (size_t)desired_allocation); #else #ifdef FEATURE_CORECLR desired_allocation; #else dd_max_size (dd0); #endif //FEATURE_CORECLR #endif // HOST_64BIT uint8_t *decommit_target = heap_segment_allocated (ephemeral_heap_segment) + slack_space; if (decommit_target < heap_segment_decommit_target (ephemeral_heap_segment)) { // we used to have a higher target - do exponential smoothing by computing // essentially decommit_target = 1/3*decommit_target + 2/3*previous_decommit_target // computation below is slightly different to avoid overflow ptrdiff_t target_decrease = heap_segment_decommit_target (ephemeral_heap_segment) - decommit_target; decommit_target += target_decrease * 2 / 3; } heap_segment_decommit_target (ephemeral_heap_segment) = decommit_target; #ifdef MULTIPLE_HEAPS if (decommit_target < heap_segment_committed (ephemeral_heap_segment)) { gradual_decommit_in_progress_p = TRUE; } #ifdef _DEBUG // these are only for checking against logic errors ephemeral_heap_segment->saved_committed = heap_segment_committed (ephemeral_heap_segment); ephemeral_heap_segment->saved_desired_allocation = dd_desired_allocation (dd0); #endif // _DEBUG #endif // MULTIPLE_HEAPS #ifndef MULTIPLE_HEAPS // we want to limit the amount of decommit we do per time to indirectly // limit the amount of time spent in recommit and page faults size_t ephemeral_elapsed = (size_t)((dd_time_clock (dd0) - gc_last_ephemeral_decommit_time) / 1000); gc_last_ephemeral_decommit_time = dd_time_clock (dd0); // this is the amount we were planning to decommit ptrdiff_t decommit_size = heap_segment_committed (ephemeral_heap_segment) - decommit_target; // we do a max of DECOMMIT_SIZE_PER_MILLISECOND per millisecond of elapsed time since the last GC // we limit the elapsed time to 10 seconds to avoid spending too much time decommitting ptrdiff_t max_decommit_size = min (ephemeral_elapsed, (10*1000)) * DECOMMIT_SIZE_PER_MILLISECOND; decommit_size = min (decommit_size, max_decommit_size); slack_space = heap_segment_committed (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment) - decommit_size; decommit_heap_segment_pages (ephemeral_heap_segment, slack_space); #endif // !MULTIPLE_HEAPS gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap(); current_gc_data_per_heap->extra_gen0_committed = heap_segment_committed (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment); #endif //MULTIPLE_HEAPS && USE_REGIONS } // return true if we actually decommitted anything bool gc_heap::decommit_step () { size_t decommit_size = 0; #ifdef USE_REGIONS const size_t max_decommit_step_size = DECOMMIT_SIZE_PER_MILLISECOND * DECOMMIT_TIME_STEP_MILLISECONDS; for (int kind = basic_free_region; kind < count_free_region_kinds; kind++) { dprintf (REGIONS_LOG, ("decommit_step %d, regions_to_decommit = %Id", kind, global_regions_to_decommit[kind].get_num_free_regions())); while (global_regions_to_decommit[kind].get_num_free_regions() > 0) { heap_segment* region = global_regions_to_decommit[kind].unlink_region_front(); uint8_t* page_start = align_lower_page(get_region_start(region)); uint8_t* end = use_large_pages_p ? heap_segment_used(region) : heap_segment_committed(region); size_t size = end - page_start; bool decommit_succeeded_p = false; if (!use_large_pages_p) { decommit_succeeded_p = virtual_decommit(page_start, size, heap_segment_oh(region), 0); dprintf(REGIONS_LOG, ("decommitted region %Ix(%Ix-%Ix) (%Iu bytes) - success: %d", region, page_start, end, size, decommit_succeeded_p)); } if (!decommit_succeeded_p) { memclr(page_start, size); dprintf(REGIONS_LOG, ("cleared region %Ix(%Ix-%Ix) (%Iu bytes)", region, page_start, end, size)); } global_region_allocator.delete_region(get_region_start(region)); decommit_size += size; if (decommit_size >= max_decommit_step_size) { return true; } } } if (use_large_pages_p) { return (decommit_size != 0); } #endif //USE_REGIONS #ifdef MULTIPLE_HEAPS // should never get here for large pages because decommit_ephemeral_segment_pages // will not do anything if use_large_pages_p is true assert(!use_large_pages_p); for (int i = 0; i < n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; decommit_size += hp->decommit_ephemeral_segment_pages_step (); } #endif //MULTIPLE_HEAPS return (decommit_size != 0); } #ifdef MULTIPLE_HEAPS // return the decommitted size size_t gc_heap::decommit_ephemeral_segment_pages_step () { size_t size = 0; #ifdef USE_REGIONS for (int gen_number = soh_gen0; gen_number <= soh_gen1; gen_number++) { generation* gen = generation_of (gen_number); heap_segment* seg = generation_tail_region (gen); #else // USE_REGIONS { heap_segment* seg = ephemeral_heap_segment; // we rely on desired allocation not being changed outside of GC assert (seg->saved_desired_allocation == dd_desired_allocation (dynamic_data_of (0))); #endif // USE_REGIONS uint8_t* decommit_target = heap_segment_decommit_target (seg); size_t EXTRA_SPACE = 2 * OS_PAGE_SIZE; decommit_target += EXTRA_SPACE; #ifdef STRESS_DECOMMIT // our decommit logic should work for a random decommit target within tail_region - make sure it does // tail region now may be different from what decommit_ephemeral_segment_pages saw decommit_target = heap_segment_mem (seg) + gc_rand::get_rand (heap_segment_reserved (seg) - heap_segment_mem (seg)); #endif //STRESS_DECOMMIT uint8_t* committed = heap_segment_committed (seg); uint8_t* allocated = (seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg); if ((allocated <= decommit_target) && (decommit_target < committed)) { #ifdef USE_REGIONS if (gen_number == soh_gen0) { // for gen 0, sync with the allocator by taking the more space lock // and re-read the variables // // we call try_enter_spin_lock here instead of enter_spin_lock because // calling enter_spin_lock from this thread can deadlock at the start // of a GC - if gc_started is already true, we call wait_for_gc_done(), // but we are on GC thread 0, so GC cannot make progress if (!try_enter_spin_lock (&more_space_lock_soh)) { continue; } add_saved_spinlock_info (false, me_acquire, mt_decommit_step); seg = generation_tail_region (gen); #ifndef STRESS_DECOMMIT decommit_target = heap_segment_decommit_target (seg); decommit_target += EXTRA_SPACE; #endif committed = heap_segment_committed (seg); allocated = (seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg); } if ((allocated <= decommit_target) && (decommit_target < committed)) #else // USE_REGIONS // we rely on other threads not messing with committed if we are about to trim it down assert (seg->saved_committed == heap_segment_committed (seg)); #endif // USE_REGIONS { // how much would we need to decommit to get to decommit_target in one step? size_t full_decommit_size = (committed - decommit_target); // don't do more than max_decommit_step_size per step size_t decommit_size = min (max_decommit_step_size, full_decommit_size); // figure out where the new committed should be uint8_t* new_committed = (committed - decommit_size); size += decommit_heap_segment_pages_worker (seg, new_committed); #ifdef _DEBUG seg->saved_committed = committed - size; #endif // _DEBUG } #ifdef USE_REGIONS if (gen_number == soh_gen0) { // for gen 0, we took the more space lock - leave it again add_saved_spinlock_info (false, me_release, mt_decommit_step); leave_spin_lock (&more_space_lock_soh); } #endif // USE_REGIONS } } return size; } #endif //MULTIPLE_HEAPS //This is meant to be called by decide_on_compacting. size_t gc_heap::generation_fragmentation (generation* gen, generation* consing_gen, uint8_t* end) { ptrdiff_t frag = 0; #ifdef USE_REGIONS for (int gen_num = 0; gen_num <= gen->gen_num; gen_num++) { generation* gen = generation_of (gen_num); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); while (seg) { frag += (heap_segment_saved_allocated (seg) - heap_segment_plan_allocated (seg)); dprintf (3, ("h%d g%d adding seg plan frag: %Ix-%Ix=%Id -> %Id", heap_number, gen_num, heap_segment_saved_allocated (seg), heap_segment_plan_allocated (seg), (heap_segment_saved_allocated (seg) - heap_segment_plan_allocated (seg)), frag)); seg = heap_segment_next_rw (seg); } } #else //USE_REGIONS uint8_t* alloc = generation_allocation_pointer (consing_gen); // If the allocation pointer has reached the ephemeral segment // fine, otherwise the whole ephemeral segment is considered // fragmentation if (in_range_for_segment (alloc, ephemeral_heap_segment)) { if (alloc <= heap_segment_allocated(ephemeral_heap_segment)) frag = end - alloc; else { // case when no survivors, allocated set to beginning frag = 0; } dprintf (3, ("ephemeral frag: %Id", frag)); } else frag = (heap_segment_allocated (ephemeral_heap_segment) - heap_segment_mem (ephemeral_heap_segment)); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(seg != NULL); while (seg != ephemeral_heap_segment) { frag += (heap_segment_allocated (seg) - heap_segment_plan_allocated (seg)); dprintf (3, ("seg: %Ix, frag: %Id", (size_t)seg, (heap_segment_allocated (seg) - heap_segment_plan_allocated (seg)))); seg = heap_segment_next_rw (seg); assert (seg); } #endif //USE_REGIONS dprintf (3, ("frag: %Id discounting pinned plugs", frag)); //add the length of the dequeued plug free space size_t bos = 0; while (bos < mark_stack_bos) { frag += (pinned_len (pinned_plug_of (bos))); dprintf (3, ("adding pinned len %Id to frag ->%Id", pinned_len (pinned_plug_of (bos)), frag)); bos++; } return frag; } // for SOH this returns the total sizes of the generation and its // younger generation(s). // for LOH this returns just LOH size. size_t gc_heap::generation_sizes (generation* gen, bool use_saved_p) { size_t result = 0; #ifdef USE_REGIONS int gen_num = gen->gen_num; int start_gen_index = ((gen_num > max_generation) ? gen_num : 0); for (int i = start_gen_index; i <= gen_num; i++) { heap_segment* seg = heap_segment_in_range (generation_start_segment (generation_of (i))); while (seg) { uint8_t* end = (use_saved_p ? heap_segment_saved_allocated (seg) : heap_segment_allocated (seg)); result += end - heap_segment_mem (seg); dprintf (3, ("h%d gen%d size + %Id (%Ix - %Ix) -> %Id", heap_number, i, (end - heap_segment_mem (seg)), heap_segment_mem (seg), end, result)); seg = heap_segment_next (seg); } } #else //USE_REGIONS if (generation_start_segment (gen ) == ephemeral_heap_segment) result = (heap_segment_allocated (ephemeral_heap_segment) - generation_allocation_start (gen)); else { heap_segment* seg = heap_segment_in_range (generation_start_segment (gen)); PREFIX_ASSUME(seg != NULL); while (seg) { result += (heap_segment_allocated (seg) - heap_segment_mem (seg)); seg = heap_segment_next_in_range (seg); } } #endif //USE_REGIONS return result; } #ifdef USE_REGIONS bool gc_heap::decide_on_compaction_space() { size_t gen0size = approximate_new_allocation(); dprintf (REGIONS_LOG, ("gen0size: %Id, free: %Id", gen0size, (num_regions_freed_in_sweep * ((size_t)1 << min_segment_size_shr)))); // If we don't compact, would we have enough space? if (sufficient_space_regions ((num_regions_freed_in_sweep * ((size_t)1 << min_segment_size_shr)), gen0size)) { dprintf (REGIONS_LOG, ("it is sufficient!")); return false; } // If we do compact, would we have enough space? get_gen0_end_plan_space(); if (!gen0_large_chunk_found) { gen0_large_chunk_found = (free_regions[basic_free_region].get_num_free_regions() > 0); } dprintf (REGIONS_LOG, ("gen0_pinned_free_space: %Id, end_gen0_region_space: %Id, gen0size: %Id", gen0_pinned_free_space, end_gen0_region_space, gen0size)); if (sufficient_space_regions ((gen0_pinned_free_space + end_gen0_region_space), gen0size) && gen0_large_chunk_found) { sufficient_gen0_space_p = TRUE; } return true; } #endif //USE_REGIONS size_t gc_heap::estimated_reclaim (int gen_number) { dynamic_data* dd = dynamic_data_of (gen_number); size_t gen_allocated = (dd_desired_allocation (dd) - dd_new_allocation (dd)); size_t gen_total_size = gen_allocated + dd_current_size (dd); size_t est_gen_surv = (size_t)((float) (gen_total_size) * dd_surv (dd)); size_t est_gen_free = gen_total_size - est_gen_surv + dd_fragmentation (dd); dprintf (GTC_LOG, ("h%d gen%d total size: %Id, est dead space: %Id (s: %d, allocated: %Id), frag: %Id", heap_number, gen_number, gen_total_size, est_gen_free, (int)(dd_surv (dd) * 100), gen_allocated, dd_fragmentation (dd))); return est_gen_free; } bool gc_heap::is_full_compacting_gc_productive() { #ifdef USE_REGIONS // If we needed to grow gen2 by extending either the end of its tail region // or having to acquire more regions for gen2, then we view this as unproductive. // // Note that when we freely choose which region to demote and promote, this calculation // will need to change. heap_segment* gen1_start_region = generation_start_segment (generation_of (max_generation - 1)); if (heap_segment_plan_gen_num (gen1_start_region) == max_generation) { dprintf (REGIONS_LOG, ("gen1 start region %Ix is now part of gen2, unproductive", heap_segment_mem (gen1_start_region))); return false; } else { heap_segment* gen2_tail_region = generation_tail_region (generation_of (max_generation)); if (heap_segment_plan_allocated (gen2_tail_region) >= heap_segment_allocated (gen2_tail_region)) { dprintf (REGIONS_LOG, ("last gen2 region extended %Ix->%Ix, unproductive", heap_segment_allocated (gen2_tail_region), heap_segment_plan_allocated (gen2_tail_region))); return false; } } return true; #else //USE_REGIONS if (generation_plan_allocation_start (generation_of (max_generation - 1)) >= generation_allocation_start (generation_of (max_generation - 1))) { dprintf (1, ("gen1 start %Ix->%Ix, gen2 size %Id->%Id, lock elevation", generation_allocation_start (generation_of (max_generation - 1)), generation_plan_allocation_start (generation_of (max_generation - 1)), generation_size (max_generation), generation_plan_size (max_generation))); return false; } else return true; #endif //USE_REGIONS } BOOL gc_heap::decide_on_compacting (int condemned_gen_number, size_t fragmentation, BOOL& should_expand) { BOOL should_compact = FALSE; should_expand = FALSE; generation* gen = generation_of (condemned_gen_number); dynamic_data* dd = dynamic_data_of (condemned_gen_number); size_t gen_sizes = generation_sizes(gen, true); float fragmentation_burden = ( ((0 == fragmentation) || (0 == gen_sizes)) ? (0.0f) : (float (fragmentation) / gen_sizes) ); dprintf (GTC_LOG, ("h%d g%d fragmentation: %Id (%d%%), gen_sizes: %Id", heap_number, settings.condemned_generation, fragmentation, (int)(fragmentation_burden * 100.0), gen_sizes)); #ifdef USE_REGIONS if (special_sweep_p) { last_gc_before_oom = FALSE; return FALSE; } #endif //USE_REGIONS #if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK) // for GC stress runs we need compaction if (GCStress<cfg_any>::IsEnabled() && !settings.concurrent) should_compact = TRUE; #endif //defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK) if (GCConfig::GetForceCompact()) should_compact = TRUE; if ((condemned_gen_number == max_generation) && last_gc_before_oom) { should_compact = TRUE; last_gc_before_oom = FALSE; get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_last_gc); } if (settings.reason == reason_induced_compacting) { dprintf (2, ("induced compacting GC")); should_compact = TRUE; get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_induced_compacting); } if (settings.reason == reason_pm_full_gc) { assert (condemned_gen_number == max_generation); if (heap_number == 0) { dprintf (GTC_LOG, ("PM doing compacting full GC after a gen1")); } should_compact = TRUE; } dprintf (2, ("Fragmentation: %d Fragmentation burden %d%%", fragmentation, (int) (100*fragmentation_burden))); if (provisional_mode_triggered && (condemned_gen_number == (max_generation - 1))) { dprintf (GTC_LOG, ("gen1 in PM always compact")); should_compact = TRUE; } #ifdef USE_REGIONS if (!should_compact) { should_compact = !!decide_on_compaction_space(); } #else //USE_REGIONS if (!should_compact) { if (dt_low_ephemeral_space_p (tuning_deciding_compaction)) { dprintf(GTC_LOG, ("compacting due to low ephemeral")); should_compact = TRUE; get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_low_ephemeral); } } if (should_compact) { if ((condemned_gen_number >= (max_generation - 1))) { if (dt_low_ephemeral_space_p (tuning_deciding_expansion)) { dprintf (GTC_LOG,("Not enough space for all ephemeral generations with compaction")); should_expand = TRUE; } } } #endif //USE_REGIONS #ifdef HOST_64BIT BOOL high_memory = FALSE; #endif // HOST_64BIT if (!should_compact) { // We are not putting this in dt_high_frag_p because it's not exactly // high fragmentation - it's just enough planned fragmentation for us to // want to compact. Also the "fragmentation" we are talking about here // is different from anywhere else. dprintf (REGIONS_LOG, ("frag: %Id, fragmentation_burden: %.3f", fragmentation, fragmentation_burden)); BOOL frag_exceeded = ((fragmentation >= dd_fragmentation_limit (dd)) && (fragmentation_burden >= dd_fragmentation_burden_limit (dd))); if (frag_exceeded) { #ifdef BACKGROUND_GC // do not force compaction if this was a stress-induced GC IN_STRESS_HEAP(if (!settings.stress_induced)) { #endif // BACKGROUND_GC assert (settings.concurrent == FALSE); should_compact = TRUE; get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_high_frag); #ifdef BACKGROUND_GC } #endif // BACKGROUND_GC } #ifdef HOST_64BIT // check for high memory situation if(!should_compact) { uint32_t num_heaps = 1; #ifdef MULTIPLE_HEAPS num_heaps = gc_heap::n_heaps; #endif // MULTIPLE_HEAPS ptrdiff_t reclaim_space = generation_size(max_generation) - generation_plan_size(max_generation); if((settings.entry_memory_load >= high_memory_load_th) && (settings.entry_memory_load < v_high_memory_load_th)) { if(reclaim_space > (int64_t)(min_high_fragmentation_threshold (entry_available_physical_mem, num_heaps))) { dprintf(GTC_LOG,("compacting due to fragmentation in high memory")); should_compact = TRUE; get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_high_mem_frag); } high_memory = TRUE; } else if(settings.entry_memory_load >= v_high_memory_load_th) { if(reclaim_space > (ptrdiff_t)(min_reclaim_fragmentation_threshold (num_heaps))) { dprintf(GTC_LOG,("compacting due to fragmentation in very high memory")); should_compact = TRUE; get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_vhigh_mem_frag); } high_memory = TRUE; } } #endif // HOST_64BIT } // The purpose of calling ensure_gap_allocation here is to make sure // that we actually are able to commit the memory to allocate generation // starts. if ((should_compact == FALSE) && (ensure_gap_allocation (condemned_gen_number) == FALSE)) { should_compact = TRUE; get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_no_gaps); } if (settings.condemned_generation == max_generation) { //check the progress if ( #ifdef HOST_64BIT (high_memory && !should_compact) || #endif // HOST_64BIT !is_full_compacting_gc_productive()) { //no progress -> lock settings.should_lock_elevation = TRUE; } } if (settings.pause_mode == pause_no_gc) { should_compact = TRUE; if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_plan_allocated (ephemeral_heap_segment)) < soh_allocation_no_gc) { should_expand = TRUE; } } dprintf (2, ("will %s(%s)", (should_compact ? "compact" : "sweep"), (should_expand ? "ex" : ""))); return should_compact; } size_t align_lower_good_size_allocation (size_t size) { return (size/64)*64; } size_t gc_heap::approximate_new_allocation() { dynamic_data* dd0 = dynamic_data_of (0); return max (2*dd_min_size (dd0), ((dd_desired_allocation (dd0)*2)/3)); } bool gc_heap::check_against_hard_limit (size_t space_required) { bool can_fit = TRUE; // If hard limit is specified, and if we attributed all that's left in commit to the ephemeral seg // so we treat that as segment end, do we have enough space. if (heap_hard_limit) { size_t left_in_commit = heap_hard_limit - current_total_committed; int num_heaps = get_num_heaps(); left_in_commit /= num_heaps; if (left_in_commit < space_required) { can_fit = FALSE; } dprintf (2, ("h%d end seg %Id, but only %Id left in HARD LIMIT commit, required: %Id %s on eph", heap_number, space_required, left_in_commit, space_required, (can_fit ? "ok" : "short"))); } return can_fit; } #ifdef USE_REGIONS bool gc_heap::sufficient_space_regions (size_t end_space, size_t end_space_required) { // REGIONS PERF TODO: we can repurpose large regions here too, if needed. size_t free_regions_space = (free_regions[basic_free_region].get_num_free_regions() * ((size_t)1 << min_segment_size_shr)) + global_region_allocator.get_free(); size_t total_alloc_space = end_space + free_regions_space; dprintf (REGIONS_LOG, ("h%d required %Id, end %Id + free %Id=%Id", heap_number, end_space_required, end_space, free_regions_space, total_alloc_space)); if (total_alloc_space > end_space_required) { return check_against_hard_limit (end_space_required); } else return false; } #else //USE_REGIONS BOOL gc_heap::sufficient_space_end_seg (uint8_t* start, uint8_t* committed, uint8_t* reserved, size_t end_space_required) { BOOL can_fit = FALSE; size_t committed_space = (size_t)(committed - start); size_t end_seg_space = (size_t)(reserved - start); if (committed_space > end_space_required) { return true; } else if (end_seg_space > end_space_required) { return check_against_hard_limit (end_space_required - committed_space); } else return false; } #endif //USE_REGIONS // After we did a GC we expect to have at least this // much space at the end of the segment to satisfy // a reasonable amount of allocation requests. size_t gc_heap::end_space_after_gc() { return max ((dd_min_size (dynamic_data_of (0))/2), (END_SPACE_AFTER_GC_FL)); } BOOL gc_heap::ephemeral_gen_fit_p (gc_tuning_point tp) { uint8_t* start = 0; #ifdef USE_REGIONS assert ((tp == tuning_deciding_condemned_gen) || (tp == tuning_deciding_full_gc)); #else//USE_REGIONS if ((tp == tuning_deciding_condemned_gen) || (tp == tuning_deciding_compaction)) { start = (settings.concurrent ? alloc_allocated : heap_segment_allocated (ephemeral_heap_segment)); if (settings.concurrent) { dprintf (2, ("%Id left at the end of ephemeral segment (alloc_allocated)", (size_t)(heap_segment_reserved (ephemeral_heap_segment) - alloc_allocated))); } else { dprintf (2, ("%Id left at the end of ephemeral segment (allocated)", (size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment)))); } } else if (tp == tuning_deciding_expansion) { start = heap_segment_plan_allocated (ephemeral_heap_segment); dprintf (2, ("%Id left at the end of ephemeral segment based on plan", (size_t)(heap_segment_reserved (ephemeral_heap_segment) - start))); } else { assert (tp == tuning_deciding_full_gc); dprintf (2, ("FGC: %Id left at the end of ephemeral segment (alloc_allocated)", (size_t)(heap_segment_reserved (ephemeral_heap_segment) - alloc_allocated))); start = alloc_allocated; } if (start == 0) // empty ephemeral generations { assert (tp == tuning_deciding_expansion); // if there are no survivors in the ephemeral segment, // this should be the beginning of ephemeral segment. start = generation_allocation_pointer (generation_of (max_generation)); assert (start == heap_segment_mem (ephemeral_heap_segment)); } if (tp == tuning_deciding_expansion) { assert (settings.condemned_generation >= (max_generation-1)); size_t gen0size = approximate_new_allocation(); size_t eph_size = gen0size; size_t gen_min_sizes = 0; for (int j = 1; j <= max_generation-1; j++) { gen_min_sizes += 2*dd_min_size (dynamic_data_of(j)); } eph_size += gen_min_sizes; dprintf (3, ("h%d deciding on expansion, need %Id (gen0: %Id, 2*min: %Id)", heap_number, gen0size, gen_min_sizes, eph_size)); // We must find room for one large object and enough room for gen0size if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - start) > eph_size) { dprintf (3, ("Enough room before end of segment")); return TRUE; } else { size_t room = align_lower_good_size_allocation (heap_segment_reserved (ephemeral_heap_segment) - start); size_t end_seg = room; //look at the plug free space size_t largest_alloc = END_SPACE_AFTER_GC_FL; bool large_chunk_found = FALSE; size_t bos = 0; uint8_t* gen0start = generation_plan_allocation_start (youngest_generation); dprintf (3, ("ephemeral_gen_fit_p: gen0 plan start: %Ix", (size_t)gen0start)); if (gen0start == 0) return FALSE; dprintf (3, ("ephemeral_gen_fit_p: room before free list search %Id, needed: %Id", room, gen0size)); while ((bos < mark_stack_bos) && !((room >= gen0size) && large_chunk_found)) { uint8_t* plug = pinned_plug (pinned_plug_of (bos)); if (in_range_for_segment (plug, ephemeral_heap_segment)) { if (plug >= gen0start) { size_t chunk = align_lower_good_size_allocation (pinned_len (pinned_plug_of (bos))); room += chunk; if (!large_chunk_found) { large_chunk_found = (chunk >= largest_alloc); } dprintf (3, ("ephemeral_gen_fit_p: room now %Id, large chunk: %Id", room, large_chunk_found)); } } bos++; } if (room >= gen0size) { if (large_chunk_found) { sufficient_gen0_space_p = TRUE; dprintf (3, ("Enough room")); return TRUE; } else { // now we need to find largest_alloc at the end of the segment. if (end_seg >= end_space_after_gc()) { dprintf (3, ("Enough room (may need end of seg)")); return TRUE; } } } dprintf (3, ("Not enough room")); return FALSE; } } else #endif //USE_REGIONS { size_t end_space = 0; dynamic_data* dd = dynamic_data_of (0); if ((tp == tuning_deciding_condemned_gen) || (tp == tuning_deciding_full_gc)) { end_space = max (2*dd_min_size (dd), end_space_after_gc()); } else { assert (tp == tuning_deciding_compaction); end_space = approximate_new_allocation(); } #ifdef USE_REGIONS size_t gen0_end_space = get_gen0_end_space(); BOOL can_fit = sufficient_space_regions (gen0_end_space, end_space); #else //USE_REGIONS BOOL can_fit = sufficient_space_end_seg (start, heap_segment_committed (ephemeral_heap_segment), heap_segment_reserved (ephemeral_heap_segment), end_space); #endif //USE_REGIONS return can_fit; } } CObjectHeader* gc_heap::allocate_uoh_object (size_t jsize, uint32_t flags, int gen_number, int64_t& alloc_bytes) { //create a new alloc context because gen3context is shared. alloc_context acontext; acontext.init(); #if HOST_64BIT size_t maxObjectSize = (INT64_MAX - 7 - Align(min_obj_size)); #else size_t maxObjectSize = (INT32_MAX - 7 - Align(min_obj_size)); #endif if (jsize >= maxObjectSize) { if (GCConfig::GetBreakOnOOM()) { GCToOSInterface::DebugBreak(); } return NULL; } size_t size = AlignQword (jsize); int align_const = get_alignment_constant (FALSE); size_t pad = 0; #ifdef FEATURE_LOH_COMPACTION if (gen_number == loh_generation) { pad = Align (loh_padding_obj_size, align_const); } #endif //FEATURE_LOH_COMPACTION assert (size >= Align (min_obj_size, align_const)); #ifdef _MSC_VER #pragma inline_depth(0) #endif //_MSC_VER if (! allocate_more_space (&acontext, (size + pad), flags, gen_number)) { return 0; } #ifdef _MSC_VER #pragma inline_depth(20) #endif //_MSC_VER #ifdef FEATURE_LOH_COMPACTION // The GC allocator made a free object already in this alloc context and // adjusted the alloc_ptr accordingly. #endif //FEATURE_LOH_COMPACTION uint8_t* result = acontext.alloc_ptr; assert ((size_t)(acontext.alloc_limit - acontext.alloc_ptr) == size); alloc_bytes += size; CObjectHeader* obj = (CObjectHeader*)result; #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { uint8_t* current_lowest_address = background_saved_lowest_address; uint8_t* current_highest_address = background_saved_highest_address; if ((result < current_highest_address) && (result >= current_lowest_address)) { dprintf (3, ("Clearing mark bit at address %Ix", (size_t)(&mark_array [mark_word_of (result)]))); mark_array_clear_marked (result); } if (current_c_gc_state != c_gc_state_free) { dprintf (3, ("Concurrent allocation of a large object %Ix", (size_t)obj)); //mark the new block specially so we know it is a new object if ((result < current_highest_address) && (result >= current_lowest_address)) { #ifdef DOUBLY_LINKED_FL heap_segment* seg = seg_mapping_table_segment_of (result); // if bgc_allocated is 0 it means it was allocated during bgc sweep, // and since sweep does not look at this seg we cannot set the mark array bit. uint8_t* background_allocated = heap_segment_background_allocated(seg); if (background_allocated != 0) #endif //DOUBLY_LINKED_FL { dprintf(3, ("Setting mark bit at address %Ix", (size_t)(&mark_array[mark_word_of(result)]))); mark_array_set_marked(result); } } } } #endif //BACKGROUND_GC assert (obj != 0); assert ((size_t)obj == Align ((size_t)obj, align_const)); return obj; } void reset_memory (uint8_t* o, size_t sizeo) { if (gc_heap::use_large_pages_p) return; if (sizeo > 128 * 1024) { // We cannot reset the memory for the useful part of a free object. size_t size_to_skip = min_free_list - plug_skew; size_t page_start = align_on_page ((size_t)(o + size_to_skip)); size_t size = align_lower_page ((size_t)o + sizeo - size_to_skip - plug_skew) - page_start; // Note we need to compensate for an OS bug here. This bug would cause the MEM_RESET to fail // on write watched memory. if (reset_mm_p && gc_heap::dt_high_memory_load_p()) { #ifdef MULTIPLE_HEAPS bool unlock_p = true; #else // We don't do unlock because there could be many processes using workstation GC and it's // bad perf to have many threads doing unlock at the same time. bool unlock_p = false; #endif //MULTIPLE_HEAPS reset_mm_p = GCToOSInterface::VirtualReset((void*)page_start, size, unlock_p); } } } BOOL gc_heap::uoh_object_marked (uint8_t* o, BOOL clearp) { BOOL m = FALSE; // It shouldn't be necessary to do these comparisons because this is only used for blocking // GCs and LOH segments cannot be out of range. if ((o >= lowest_address) && (o < highest_address)) { if (marked (o)) { if (clearp) { clear_marked (o); if (pinned (o)) clear_pinned(o); } m = TRUE; } else m = FALSE; } else m = TRUE; return m; } void gc_heap::walk_survivors_relocation (void* profiling_context, record_surv_fn fn) { // Now walk the portion of memory that is actually being relocated. walk_relocation (profiling_context, fn); #ifdef FEATURE_LOH_COMPACTION if (loh_compacted_p) { walk_relocation_for_loh (profiling_context, fn); } #endif //FEATURE_LOH_COMPACTION } void gc_heap::walk_survivors_for_uoh (void* profiling_context, record_surv_fn fn, int gen_number) { generation* gen = generation_of (gen_number); heap_segment* seg = heap_segment_rw (generation_start_segment (gen));; PREFIX_ASSUME(seg != NULL); uint8_t* o = get_uoh_start_object (seg, gen); uint8_t* plug_end = o; uint8_t* plug_start = o; while (1) { if (o >= heap_segment_allocated (seg)) { seg = heap_segment_next (seg); if (seg == 0) break; else o = heap_segment_mem (seg); } if (uoh_object_marked(o, FALSE)) { plug_start = o; BOOL m = TRUE; while (m) { o = o + AlignQword (size (o)); if (o >= heap_segment_allocated (seg)) { break; } m = uoh_object_marked (o, FALSE); } plug_end = o; fn (plug_start, plug_end, 0, profiling_context, false, false); } else { while (o < heap_segment_allocated (seg) && !uoh_object_marked(o, FALSE)) { o = o + AlignQword (size (o)); } } } } #ifdef BACKGROUND_GC BOOL gc_heap::background_object_marked (uint8_t* o, BOOL clearp) { BOOL m = FALSE; if ((o >= background_saved_lowest_address) && (o < background_saved_highest_address)) { if (mark_array_marked (o)) { if (clearp) { mark_array_clear_marked (o); //dprintf (3, ("mark array bit for object %Ix is cleared", o)); dprintf (3, ("CM: %Ix", o)); } m = TRUE; } else m = FALSE; } else m = TRUE; dprintf (3, ("o %Ix(%d) %s", o, size(o), (m ? "was bm" : "was NOT bm"))); return m; } void gc_heap::background_delay_delete_uoh_segments() { for (int i = uoh_start_generation; i < total_generation_count; i++) { generation* gen = generation_of (i); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); heap_segment* prev_seg = 0; #ifdef USE_REGIONS heap_segment* first_remaining_region = 0; #endif //USE_REGIONS while (seg) { heap_segment* next_seg = heap_segment_next (seg); if (seg->flags & heap_segment_flags_uoh_delete) { dprintf (3, ("deleting %Ix-%Ix-%Ix", (size_t)seg, heap_segment_allocated (seg), heap_segment_reserved (seg))); delete_heap_segment (seg, (GCConfig::GetRetainVM() != 0)); heap_segment_next (prev_seg) = next_seg; #ifdef USE_REGIONS update_start_tail_regions (gen, seg, prev_seg, next_seg); #endif //USE_REGIONS } else { #ifdef USE_REGIONS if (!first_remaining_region) first_remaining_region = seg; #endif //USE_REGIONS prev_seg = seg; } seg = next_seg; } #ifdef USE_REGIONS assert (heap_segment_rw (generation_start_segment (gen)) == generation_start_segment (gen)); if (generation_start_segment (gen) != first_remaining_region) { dprintf (REGIONS_LOG, ("h%d gen%d start %Ix -> %Ix", heap_number, gen->gen_num, heap_segment_mem (generation_start_segment (gen)), heap_segment_mem (first_remaining_region))); generation_start_segment (gen) = first_remaining_region; } if (generation_tail_region (gen) != prev_seg) { dprintf (REGIONS_LOG, ("h%d gen%d start %Ix -> %Ix", heap_number, gen->gen_num, heap_segment_mem (generation_tail_region (gen)), heap_segment_mem (prev_seg))); generation_tail_region (gen) = prev_seg; } #endif //USE_REGIONS } } uint8_t* gc_heap::background_next_end (heap_segment* seg, BOOL uoh_objects_p) { return (uoh_objects_p ? heap_segment_allocated (seg) : heap_segment_background_allocated (seg)); } void gc_heap::set_mem_verify (uint8_t* start, uint8_t* end, uint8_t b) { #ifdef VERIFY_HEAP if (end > start) { if ((GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) && !(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_NO_MEM_FILL)) { dprintf (3, ("setting mem to %c [%Ix, [%Ix", b, start, end)); memset (start, b, (end - start)); } } #endif //VERIFY_HEAP } void gc_heap::generation_delete_heap_segment (generation* gen, heap_segment* seg, heap_segment* prev_seg, heap_segment* next_seg) { dprintf (3, ("bgc sweep: deleting seg %Ix(%Ix), next %Ix(%Ix), prev %Ix(%Ix)", (size_t)seg, heap_segment_mem (seg), (size_t)next_seg, (next_seg ? heap_segment_mem (next_seg) : 0), (size_t)prev_seg, (prev_seg ? heap_segment_mem (prev_seg) : 0))); if (gen->gen_num > max_generation) { dprintf (3, ("Preparing empty large segment %Ix for deletion", (size_t)seg)); // We cannot thread segs in here onto freeable_uoh_segment because // grow_brick_card_tables could be committing mark array which needs to read // the seg list. So we delay it till next time we suspend EE. seg->flags |= heap_segment_flags_uoh_delete; // Since we will be decommitting the seg, we need to prevent heap verification // to verify this segment. heap_segment_allocated (seg) = heap_segment_mem (seg); } else { assert (seg != ephemeral_heap_segment); #ifdef DOUBLY_LINKED_FL // For doubly linked list we go forward for SOH heap_segment_next (prev_seg) = next_seg; #else //DOUBLY_LINKED_FL heap_segment_next (next_seg) = prev_seg; #endif //DOUBLY_LINKED_FL dprintf (3, ("Preparing empty small segment %Ix for deletion", (size_t)seg)); heap_segment_next (seg) = freeable_soh_segment; freeable_soh_segment = seg; #ifdef USE_REGIONS #ifdef DOUBLY_LINKED_FL heap_segment* next_region = next_seg; heap_segment* prev_region = prev_seg; #else //DOUBLY_LINKED_FL heap_segment* next_region = prev_seg; heap_segment* prev_region = next_seg; #endif //DOUBLY_LINKED_FL update_start_tail_regions (gen, seg, prev_region, next_region); #endif //USE_REGIONS } decommit_heap_segment (seg); seg->flags |= heap_segment_flags_decommitted; set_mem_verify (heap_segment_allocated (seg) - plug_skew, heap_segment_used (seg), 0xbb); } void gc_heap::process_background_segment_end (heap_segment* seg, generation* gen, uint8_t* last_plug_end, heap_segment* start_seg, BOOL* delete_p, size_t free_obj_size_last_gap) { *delete_p = FALSE; uint8_t* allocated = heap_segment_allocated (seg); uint8_t* background_allocated = heap_segment_background_allocated (seg); BOOL uoh_p = heap_segment_uoh_p (seg); dprintf (3, ("EoS [%Ix, %Ix[(%Ix[), last: %Ix(%Id)", (size_t)heap_segment_mem (seg), background_allocated, allocated, last_plug_end, free_obj_size_last_gap)); if (!uoh_p && (allocated != background_allocated)) { assert (gen->gen_num <= max_generation); dprintf (3, ("Make a free object before newly promoted objects [%Ix, %Ix[", (size_t)last_plug_end, background_allocated)); size_t last_gap = background_allocated - last_plug_end; if (last_gap > 0) { thread_gap (last_plug_end, last_gap, generation_of (max_generation)); add_gen_free (max_generation, last_gap); fix_brick_to_highest (last_plug_end, background_allocated); // When we allowed fgc's during going through gaps, we could have erased the brick // that corresponds to bgc_allocated 'cause we had to update the brick there, // recover it here. fix_brick_to_highest (background_allocated, background_allocated); } } else { // by default, if allocated == background_allocated, it can't // be the ephemeral segment. if (seg == ephemeral_heap_segment) { FATAL_GC_ERROR(); } #ifndef USE_REGIONS if (allocated == heap_segment_mem (seg)) { // this can happen with UOH segments when multiple threads // allocate new segments and not all of them were needed to // satisfy allocation requests. assert (gen->gen_num > max_generation); } #endif //!USE_REGIONS if (last_plug_end == heap_segment_mem (seg)) { // REGIONS TODO: start_seg doesn't matter for regions. We can get rid of it too. // Just need to update the start segment accordingly in generation_delete_heap_segment. // Also this might leave us with no regions at all for gen2 and we should be prepared // for that. One approach is to ensure at least one region per generation at the beginning // of a GC. if (seg != start_seg) { *delete_p = TRUE; } dprintf (3, ("h%d seg %Ix %s be deleted", heap_number, heap_segment_mem (seg), (*delete_p ? "should" : "should not"))); } if (!*delete_p) { dprintf (3, ("[h%d] seg %Ix alloc %Ix->%Ix", heap_number, (size_t)seg, heap_segment_allocated (seg), (size_t)last_plug_end)); heap_segment_allocated (seg) = last_plug_end; set_mem_verify (heap_segment_allocated (seg) - plug_skew, heap_segment_used (seg), 0xbb); decommit_heap_segment_pages (seg, 0); } } if (free_obj_size_last_gap) { generation_free_obj_space (gen) -= free_obj_size_last_gap; dprintf (2, ("[h%d] PS: gen2FO-: %Id->%Id", heap_number, free_obj_size_last_gap, generation_free_obj_space (gen))); } dprintf (3, ("verifying seg %Ix's mark array was completely cleared", seg)); bgc_verify_mark_array_cleared (seg); } inline BOOL gc_heap::fgc_should_consider_object (uint8_t* o, heap_segment* seg, BOOL consider_bgc_mark_p, BOOL check_current_sweep_p, BOOL check_saved_sweep_p) { #ifdef USE_REGIONS assert (!check_saved_sweep_p); #endif //USE_REGIONS // the logic for this function must be kept in sync with the analogous function // in ToolBox\SOS\Strike\gc.cpp // TRUE means we don't need to check the bgc mark bit // FALSE means we do. BOOL no_bgc_mark_p = FALSE; if (consider_bgc_mark_p) { if (check_current_sweep_p && (o < current_sweep_pos)) { dprintf (3, ("no bgc mark - o: %Ix < cs: %Ix", o, current_sweep_pos)); no_bgc_mark_p = TRUE; } if (!no_bgc_mark_p) { #ifndef USE_REGIONS if(check_saved_sweep_p && (o >= saved_sweep_ephemeral_start)) { dprintf (3, ("no bgc mark - o: %Ix >= ss: %Ix", o, saved_sweep_ephemeral_start)); no_bgc_mark_p = TRUE; } #endif //!USE_REGIONS if (!check_saved_sweep_p) { uint8_t* background_allocated = heap_segment_background_allocated (seg); #ifndef USE_REGIONS // if this was the saved ephemeral segment, check_saved_sweep_p // would've been true. assert (heap_segment_background_allocated (seg) != saved_sweep_ephemeral_start); #endif //!USE_REGIONS // background_allocated could be 0 for the new segments acquired during bgc // sweep and we still want no_bgc_mark_p to be true. if (o >= background_allocated) { dprintf (3, ("no bgc mark - o: %Ix >= ba: %Ix", o, background_allocated)); no_bgc_mark_p = TRUE; } } } } else { no_bgc_mark_p = TRUE; } dprintf (3, ("bgc mark %Ix: %s (bm: %s)", o, (no_bgc_mark_p ? "no" : "yes"), ((no_bgc_mark_p || background_object_marked (o, FALSE)) ? "yes" : "no"))); return (no_bgc_mark_p ? TRUE : background_object_marked (o, FALSE)); } // consider_bgc_mark_p tells you if you need to care about the bgc mark bit at all // if it's TRUE, check_current_sweep_p tells you if you should consider the // current sweep position or not. void gc_heap::should_check_bgc_mark (heap_segment* seg, BOOL* consider_bgc_mark_p, BOOL* check_current_sweep_p, BOOL* check_saved_sweep_p) { // the logic for this function must be kept in sync with the analogous function // in ToolBox\SOS\Strike\gc.cpp *consider_bgc_mark_p = FALSE; *check_current_sweep_p = FALSE; *check_saved_sweep_p = FALSE; if (current_c_gc_state == c_gc_state_planning) { // We are doing the current_sweep_pos comparison here because we have yet to // turn on the swept flag for the segment but in_range_for_segment will return // FALSE if the address is the same as reserved. if ((seg->flags & heap_segment_flags_swept) || (current_sweep_pos == heap_segment_reserved (seg))) { dprintf (3, ("seg %Ix is already swept by bgc", seg)); } else if (heap_segment_background_allocated (seg) == 0) { dprintf (3, ("seg %Ix newly alloc during bgc")); } else { *consider_bgc_mark_p = TRUE; dprintf (3, ("seg %Ix hasn't been swept by bgc", seg)); #ifndef USE_REGIONS if (seg == saved_sweep_ephemeral_seg) { dprintf (3, ("seg %Ix is the saved ephemeral seg", seg)); *check_saved_sweep_p = TRUE; } #endif //!USE_REGIONS if (in_range_for_segment (current_sweep_pos, seg)) { dprintf (3, ("current sweep pos is %Ix and within seg %Ix", current_sweep_pos, seg)); *check_current_sweep_p = TRUE; } } } } // REGIONS TODO: I'm not releasing any empty ephemeral regions here the gen0 allocator is // iterating over these regions. We'd want to do the same as what we do with LOH segs/regions. void gc_heap::background_ephemeral_sweep() { dprintf (3, ("bgc ephemeral sweep")); int align_const = get_alignment_constant (TRUE); #ifndef USE_REGIONS saved_sweep_ephemeral_seg = ephemeral_heap_segment; saved_sweep_ephemeral_start = generation_allocation_start (generation_of (max_generation - 1)); #endif //!USE_REGIONS // Since we don't want to interfere with gen0 allocation while we are threading gen0 free list, // we thread onto a list first then publish it when we are done. allocator youngest_free_list; size_t youngest_free_list_space = 0; size_t youngest_free_obj_space = 0; youngest_free_list.clear(); for (int i = 0; i <= (max_generation - 1); i++) { generation* gen_to_reset = generation_of (i); assert (generation_free_list_space (gen_to_reset) == 0); // Can only assert free_list_space is 0, not free_obj_space as the allocator could have added // something there. } for (int i = (max_generation - 1); i >= 0; i--) { generation* current_gen = generation_of (i); #ifdef USE_REGIONS heap_segment* ephemeral_region = heap_segment_rw (generation_start_segment (current_gen)); while (ephemeral_region) #endif //USE_REGIONS { #ifdef USE_REGIONS uint8_t* o = heap_segment_mem (ephemeral_region); uint8_t* end = heap_segment_background_allocated (ephemeral_region); dprintf (3, ("bgc eph: gen%d seg %Ix(%Ix-%Ix)", heap_segment_gen_num (ephemeral_region), heap_segment_mem (ephemeral_region), heap_segment_allocated (ephemeral_region), heap_segment_background_allocated (ephemeral_region))); // This doesn't conflict with the allocator getting a new region in gen0. // If the allocator just threaded a region onto the gen0 region list we will // read that region and detect that its background allocated is 0. if (!end) { ephemeral_region->flags |= heap_segment_flags_swept; ephemeral_region = heap_segment_next (ephemeral_region); continue; } #else //USE_REGIONS uint8_t* o = generation_allocation_start (current_gen); //Skip the generation gap object o = o + Align(size (o), align_const); uint8_t* end = ((i > 0) ? generation_allocation_start (generation_of (i - 1)) : heap_segment_allocated (ephemeral_heap_segment)); #endif //USE_REGIONS uint8_t* plug_end = o; uint8_t* plug_start = o; BOOL marked_p = FALSE; while (o < end) { marked_p = background_object_marked (o, TRUE); if (marked_p) { plug_start = o; size_t plug_size = plug_start - plug_end; if (i >= 1) { thread_gap (plug_end, plug_size, current_gen); } else { if (plug_size > 0) { make_unused_array (plug_end, plug_size); if (plug_size >= min_free_list) { youngest_free_list_space += plug_size; youngest_free_list.thread_item (plug_end, plug_size); } else { youngest_free_obj_space += plug_size; } } } fix_brick_to_highest (plug_end, plug_start); fix_brick_to_highest (plug_start, plug_start); BOOL m = TRUE; while (m) { o = o + Align (size (o), align_const); if (o >= end) { break; } m = background_object_marked (o, TRUE); } plug_end = o; dprintf (3, ("bgs: plug [%Ix, %Ix[", (size_t)plug_start, (size_t)plug_end)); } else { while ((o < end) && !background_object_marked (o, FALSE)) { o = o + Align (size (o), align_const); } } } if (plug_end != end) { if (i >= 1) { thread_gap (plug_end, end - plug_end, current_gen); } else { #ifndef USE_REGIONS heap_segment_allocated (ephemeral_heap_segment) = plug_end; heap_segment_saved_bg_allocated (ephemeral_heap_segment) = plug_end; #endif //!USE_REGIONS make_unused_array (plug_end, (end - plug_end)); } fix_brick_to_highest (plug_end, end); } #ifdef USE_REGIONS ephemeral_region->flags |= heap_segment_flags_swept; // Setting this to 0 so background_sweep can terminate for SOH. heap_segment_background_allocated (ephemeral_region) = 0; ephemeral_region = heap_segment_next (ephemeral_region); #endif //USE_REGIONS } dd_fragmentation (dynamic_data_of (i)) = generation_free_list_space (current_gen) + generation_free_obj_space (current_gen); } generation* youngest_gen = generation_of (0); generation_free_list_space (youngest_gen) = youngest_free_list_space; generation_free_obj_space (youngest_gen) = youngest_free_obj_space; dd_fragmentation (dynamic_data_of (0)) = youngest_free_list_space + youngest_free_obj_space; generation_allocator (youngest_gen)->copy_with_no_repair (&youngest_free_list); } void gc_heap::background_sweep() { //concurrent_print_time_delta ("finished with mark and start with sweep"); concurrent_print_time_delta ("Sw"); dprintf (2, ("---- (GC%d)Background Sweep Phase ----", VolatileLoad(&settings.gc_index))); //block concurrent allocation for large objects dprintf (3, ("lh state: planning")); for (int i = 0; i <= max_generation; i++) { generation* gen_to_reset = generation_of (i); #ifdef DOUBLY_LINKED_FL if (i == max_generation) { dprintf (2, ("h%d: gen2 still has FL: %Id, FO: %Id", heap_number, generation_free_list_space (gen_to_reset), generation_free_obj_space (gen_to_reset))); } else #endif //DOUBLY_LINKED_FL { generation_allocator (gen_to_reset)->clear(); generation_free_list_space (gen_to_reset) = 0; generation_free_obj_space (gen_to_reset) = 0; } generation_free_list_allocated (gen_to_reset) = 0; generation_end_seg_allocated (gen_to_reset) = 0; generation_condemned_allocated (gen_to_reset) = 0; generation_sweep_allocated (gen_to_reset) = 0; //reset the allocation so foreground gc can allocate into older generation generation_allocation_pointer (gen_to_reset)= 0; generation_allocation_limit (gen_to_reset) = 0; generation_allocation_segment (gen_to_reset) = heap_segment_rw (generation_start_segment (gen_to_reset)); } FIRE_EVENT(BGC2ndNonConEnd); uoh_alloc_thread_count = 0; init_free_and_plug(); current_bgc_state = bgc_sweep_soh; verify_soh_segment_list(); #ifdef DOUBLY_LINKED_FL // set the initial segment and position so that foreground GC knows where BGC is with the sweep current_sweep_seg = heap_segment_rw (generation_start_segment (generation_of (max_generation))); current_sweep_pos = 0; #endif //DOUBLY_LINKED_FL #ifdef FEATURE_BASICFREEZE generation* max_gen = generation_of (max_generation); if ((generation_start_segment (max_gen) != ephemeral_heap_segment) && ro_segments_in_range) { sweep_ro_segments (generation_start_segment (max_gen)); } #endif // FEATURE_BASICFREEZE if (current_c_gc_state != c_gc_state_planning) { current_c_gc_state = c_gc_state_planning; } concurrent_print_time_delta ("Swe"); for (int i = uoh_start_generation; i < total_generation_count; i++) { heap_segment* uoh_seg = heap_segment_rw (generation_start_segment (generation_of (i))); PREFIX_ASSUME(uoh_seg != NULL); while (uoh_seg) { uoh_seg->flags &= ~heap_segment_flags_swept; heap_segment_background_allocated (uoh_seg) = heap_segment_allocated (uoh_seg); uoh_seg = heap_segment_next_rw (uoh_seg); } } #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_restart_ee); if (bgc_t_join.joined()) { dprintf(2, ("Starting BGC threads for resuming EE")); bgc_t_join.restart(); } #endif //MULTIPLE_HEAPS if (heap_number == 0) { #ifdef BGC_SERVO_TUNING get_and_reset_loh_alloc_info(); #endif //BGC_SERVO_TUNING uint64_t suspended_end_ts = GetHighPrecisionTimeStamp(); last_bgc_info[last_bgc_info_index].pause_durations[1] = (size_t)(suspended_end_ts - suspended_start_time); total_suspended_time += last_bgc_info[last_bgc_info_index].pause_durations[1]; restart_EE (); } FIRE_EVENT(BGC2ndConBegin); background_ephemeral_sweep(); concurrent_print_time_delta ("Swe eph"); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_after_ephemeral_sweep); if (bgc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_EVENT_TRACE bgc_heap_walk_for_etw_p = GCEventStatus::IsEnabled(GCEventProvider_Default, GCEventKeyword_GCHeapSurvivalAndMovement, GCEventLevel_Information); #endif //FEATURE_EVENT_TRACE leave_spin_lock (&gc_lock); #ifdef MULTIPLE_HEAPS dprintf(2, ("Starting BGC threads for BGC sweeping")); bgc_t_join.restart(); #endif //MULTIPLE_HEAPS } disable_preemptive (true); dynamic_data* dd = dynamic_data_of (max_generation); const int num_objs = 256; int current_num_objs = 0; for (int i = max_generation; i < total_generation_count; i++) { generation* gen = generation_of (i); heap_segment* gen_start_seg = heap_segment_rw (generation_start_segment(gen)); heap_segment* next_seg = 0; heap_segment* prev_seg; heap_segment* start_seg; int align_const = get_alignment_constant (i == max_generation); #ifndef DOUBLY_LINKED_FL if (i == max_generation) { #ifdef USE_REGIONS start_seg = generation_tail_region (gen); #else // start with saved ephemeral segment // we are no longer holding gc_lock, so a new ephemeral segment could be added, we want the saved one. start_seg = saved_sweep_ephemeral_seg; #endif //USE_REGIONS prev_seg = heap_segment_next(start_seg); } else #endif //!DOUBLY_LINKED_FL { // If we use doubly linked FL we don't need to go backwards as we are maintaining the free list. start_seg = gen_start_seg; prev_seg = NULL; if (i > max_generation) { // UOH allocations are allowed while sweeping SOH, so // we defer clearing UOH free lists until we start sweeping them generation_allocator (gen)->clear(); generation_free_list_space (gen) = 0; generation_free_obj_space (gen) = 0; generation_free_list_allocated (gen) = 0; generation_end_seg_allocated (gen) = 0; generation_condemned_allocated (gen) = 0; generation_sweep_allocated (gen) = 0; generation_allocation_pointer (gen)= 0; generation_allocation_limit (gen) = 0; generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen)); } else { dprintf (3333, ("h%d: SOH sweep start on seg %Ix: total FL: %Id, FO: %Id", heap_number, (size_t)start_seg, generation_free_list_space (gen), generation_free_obj_space (gen))); } } PREFIX_ASSUME(start_seg != NULL); heap_segment* seg = start_seg; dprintf (2, ("bgs: sweeping gen %Ix seg %Ix->%Ix(%Ix)", gen->gen_num, heap_segment_mem (seg), heap_segment_allocated (seg), heap_segment_background_allocated (seg))); while (seg #ifdef DOUBLY_LINKED_FL // We no longer go backwards in segment list for SOH so we need to bail when we see // segments newly allocated during bgc sweep. && !((heap_segment_background_allocated (seg) == 0) && (gen != large_object_generation)) #endif //DOUBLY_LINKED_FL ) { uint8_t* o = heap_segment_mem (seg); if (seg == gen_start_seg) { #ifndef USE_REGIONS assert (o == generation_allocation_start (gen)); assert (method_table (o) == g_gc_pFreeObjectMethodTable); o = o + Align (size (o), align_const); #endif //!USE_REGIONS } uint8_t* plug_end = o; current_sweep_pos = o; next_sweep_obj = o; #ifdef DOUBLY_LINKED_FL current_sweep_seg = seg; #endif //DOUBLY_LINKED_FL // This records the total size of free objects (including the ones on and not on FL) // in the gap and it gets set to 0 when we encounter a plug. If the last gap we saw // on a seg is unmarked, we will process this in process_background_segment_end. size_t free_obj_size_last_gap = 0; allow_fgc(); uint8_t* end = background_next_end (seg, (i > max_generation)); dprintf (3333, ("bgs: seg: %Ix, [%Ix, %Ix[%Ix", (size_t)seg, (size_t)heap_segment_mem (seg), (size_t)heap_segment_allocated (seg), (size_t)heap_segment_background_allocated (seg))); while (o < end) { if (background_object_marked (o, TRUE)) { uint8_t* plug_start = o; if (i > max_generation) { dprintf (2, ("uoh fr: [%Ix-%Ix[(%Id)", plug_end, plug_start, plug_start-plug_end)); } thread_gap (plug_end, plug_start-plug_end, gen); if (i == max_generation) { add_gen_free (max_generation, plug_start-plug_end); #ifdef DOUBLY_LINKED_FL if (free_obj_size_last_gap) { generation_free_obj_space (gen) -= free_obj_size_last_gap; dprintf (3333, ("[h%d] LG: gen2FO-: %Id->%Id", heap_number, free_obj_size_last_gap, generation_free_obj_space (gen))); free_obj_size_last_gap = 0; } #endif //DOUBLY_LINKED_FL fix_brick_to_highest (plug_end, plug_start); // we need to fix the brick for the next plug here 'cause an FGC can // happen and can't read a stale brick. fix_brick_to_highest (plug_start, plug_start); } do { next_sweep_obj = o + Align (size (o), align_const); current_num_objs++; if (current_num_objs >= num_objs) { current_sweep_pos = next_sweep_obj; allow_fgc(); current_num_objs = 0; } o = next_sweep_obj; } while ((o < end) && background_object_marked(o, TRUE)); plug_end = o; if (i == max_generation) { add_gen_plug (max_generation, plug_end-plug_start); dd_survived_size (dd) += (plug_end - plug_start); } dprintf (3, ("bgs: plug [%Ix, %Ix[", (size_t)plug_start, (size_t)plug_end)); } while ((o < end) && !background_object_marked (o, FALSE)) { size_t size_o = Align(size (o), align_const); next_sweep_obj = o + size_o; #ifdef DOUBLY_LINKED_FL if (gen != large_object_generation) { if (method_table (o) == g_gc_pFreeObjectMethodTable) { free_obj_size_last_gap += size_o; if (is_on_free_list (o, size_o)) { generation_allocator (gen)->unlink_item_no_undo (o, size_o); generation_free_list_space (gen) -= size_o; assert ((ptrdiff_t)generation_free_list_space (gen) >= 0); generation_free_obj_space (gen) += size_o; dprintf (3333, ("[h%d] gen2F-: %Ix->%Ix(%Id) FL: %Id", heap_number, o, (o + size_o), size_o, generation_free_list_space (gen))); dprintf (3333, ("h%d: gen2FO+: %Ix(%Id)->%Id (g: %Id)", heap_number, o, size_o, generation_free_obj_space (gen), free_obj_size_last_gap)); remove_gen_free (max_generation, size_o); } else { // this was not on the free list so it was already part of // free_obj_space, so no need to substract from it. However, // we do need to keep track in this gap's FO space. dprintf (3333, ("h%d: gen2FO: %Ix(%Id)->%Id (g: %Id)", heap_number, o, size_o, generation_free_obj_space (gen), free_obj_size_last_gap)); } dprintf (3333, ("h%d: total FO: %Ix->%Ix FL: %Id, FO: %Id (g: %Id)", heap_number, plug_end, next_sweep_obj, generation_free_list_space (gen), generation_free_obj_space (gen), free_obj_size_last_gap)); } } #endif //DOUBLY_LINKED_FL current_num_objs++; if (current_num_objs >= num_objs) { current_sweep_pos = plug_end; dprintf (1234, ("f: swept till %Ix", current_sweep_pos)); allow_fgc(); current_num_objs = 0; } o = next_sweep_obj; } } #ifdef DOUBLY_LINKED_FL next_seg = heap_segment_next (seg); #else //DOUBLY_LINKED_FL if (i > max_generation) { next_seg = heap_segment_next (seg); } else { // For SOH segments we go backwards. next_seg = heap_segment_prev (gen_start_seg, seg); } #endif //DOUBLY_LINKED_FL BOOL delete_p = FALSE; if (!heap_segment_read_only_p (seg)) { if (i > max_generation) { // we can treat all UOH segments as in the bgc domain // regardless of whether we saw in bgc mark or not // because we don't allow UOH allocations during bgc // sweep anyway - the UOH segments can't change. process_background_segment_end (seg, gen, plug_end, start_seg, &delete_p, 0); } else { assert (heap_segment_background_allocated (seg) != 0); process_background_segment_end (seg, gen, plug_end, start_seg, &delete_p, free_obj_size_last_gap); #ifndef USE_REGIONS assert (next_seg || !delete_p); #endif //!USE_REGIONS } } heap_segment* saved_prev_seg = prev_seg; if (delete_p) { generation_delete_heap_segment (gen, seg, prev_seg, next_seg); } else { prev_seg = seg; dprintf (2, ("seg %Ix (%Ix) has been swept", seg, heap_segment_mem (seg))); seg->flags |= heap_segment_flags_swept; current_sweep_pos = end; } verify_soh_segment_list(); #ifdef DOUBLY_LINKED_FL while (next_seg && heap_segment_background_allocated (next_seg) == 0) { dprintf (2, ("[h%d] skip new %Ix ", heap_number, next_seg)); next_seg = heap_segment_next (next_seg); } #endif //DOUBLY_LINKED_FL dprintf (GTC_LOG, ("seg: %Ix(%Ix), next_seg: %Ix(%Ix), prev_seg: %Ix(%Ix), delete_p %d", seg, (seg ? heap_segment_mem (seg) : 0), next_seg, (next_seg ? heap_segment_mem (next_seg) : 0), saved_prev_seg, (saved_prev_seg ? heap_segment_mem (saved_prev_seg) : 0), (delete_p ? 1 : 0))); seg = next_seg; } generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(generation_allocation_segment(gen) != NULL); if (i == max_generation) { dprintf (2, ("bgs: sweeping uoh objects")); concurrent_print_time_delta ("Swe SOH"); FIRE_EVENT(BGC1stSweepEnd, 0); enter_spin_lock (&more_space_lock_uoh); add_saved_spinlock_info (true, me_acquire, mt_bgc_uoh_sweep); concurrent_print_time_delta ("Swe UOH took msl"); // We wait till all allocating threads are completely done. int spin_count = yp_spin_count_unit; while (uoh_alloc_thread_count) { spin_and_switch (spin_count, (uoh_alloc_thread_count == 0)); } current_bgc_state = bgc_sweep_uoh; } } size_t total_soh_size = generation_sizes (generation_of (max_generation)); size_t total_loh_size = generation_size (loh_generation); size_t total_poh_size = generation_size (poh_generation); dprintf (GTC_LOG, ("h%d: S: poh: %Id, loh: %Id, soh: %Id", heap_number, total_poh_size, total_loh_size, total_soh_size)); dprintf (GTC_LOG, ("end of bgc sweep: gen2 FL: %Id, FO: %Id", generation_free_list_space (generation_of (max_generation)), generation_free_obj_space (generation_of (max_generation)))); dprintf (GTC_LOG, ("h%d: end of bgc sweep: loh FL: %Id, FO: %Id", heap_number, generation_free_list_space (generation_of (loh_generation)), generation_free_obj_space (generation_of (loh_generation)))); dprintf (GTC_LOG, ("h%d: end of bgc sweep: poh FL: %Id, FO: %Id", heap_number, generation_free_list_space (generation_of (poh_generation)), generation_free_obj_space (generation_of (poh_generation)))); FIRE_EVENT(BGC2ndConEnd); concurrent_print_time_delta ("background sweep"); heap_segment* reset_seg = heap_segment_rw (generation_start_segment (generation_of (max_generation))); PREFIX_ASSUME(reset_seg != NULL); while (reset_seg) { heap_segment_saved_bg_allocated (reset_seg) = heap_segment_background_allocated (reset_seg); heap_segment_background_allocated (reset_seg) = 0; reset_seg = heap_segment_next_rw (reset_seg); } // We calculate dynamic data here because if we wait till we signal the lh event, // the allocation thread can change the fragmentation and we may read an intermediate // value (which can be greater than the generation size). Plus by that time it won't // be accurate. compute_new_dynamic_data (max_generation); #ifdef DOUBLY_LINKED_FL current_bgc_state = bgc_not_in_process; // We can have an FGC triggered before we set the global state to free // so we need to not have left over current_sweep_seg that point to // a segment that might've been deleted at the beginning of an FGC. current_sweep_seg = 0; #endif //DOUBLY_LINKED_FL enable_preemptive (); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_set_state_free); if (bgc_t_join.joined()) #endif //MULTIPLE_HEAPS { // TODO: We are using this join just to set the state. Should // look into eliminating it - check to make sure things that use // this state can live with per heap state like should_check_bgc_mark. current_c_gc_state = c_gc_state_free; #ifdef BGC_SERVO_TUNING if (bgc_tuning::enable_fl_tuning) { enter_spin_lock (&gc_lock); bgc_tuning::record_and_adjust_bgc_end(); leave_spin_lock (&gc_lock); } #endif //BGC_SERVO_TUNING #ifdef MULTIPLE_HEAPS dprintf(2, ("Starting BGC threads after background sweep phase")); bgc_t_join.restart(); #endif //MULTIPLE_HEAPS } disable_preemptive (true); add_saved_spinlock_info (true, me_release, mt_bgc_uoh_sweep); leave_spin_lock (&more_space_lock_uoh); //dprintf (GTC_LOG, ("---- (GC%d)End Background Sweep Phase ----", VolatileLoad(&settings.gc_index))); dprintf (GTC_LOG, ("---- (GC%d)ESw ----", VolatileLoad(&settings.gc_index))); } #endif //BACKGROUND_GC void gc_heap::sweep_uoh_objects (int gen_num) { //this min value is for the sake of the dynamic tuning. //so we know that we are not starting even if we have no //survivors. generation* gen = generation_of (gen_num); heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(start_seg != NULL); heap_segment* seg = start_seg; heap_segment* prev_seg = 0; uint8_t* o = get_uoh_start_object (seg, gen); uint8_t* plug_end = o; uint8_t* plug_start = o; generation_allocator (gen)->clear(); generation_free_list_space (gen) = 0; generation_free_obj_space (gen) = 0; generation_free_list_allocated (gen) = 0; dprintf (3, ("sweeping uoh objects")); dprintf (3, ("seg: %Ix, [%Ix, %Ix[, starting from %Ix", (size_t)seg, (size_t)heap_segment_mem (seg), (size_t)heap_segment_allocated (seg), o)); while (1) { if (o >= heap_segment_allocated (seg)) { heap_segment* next_seg = heap_segment_next (seg); //delete the empty segment if not the only one // REGIONS TODO: for regions we can get rid of the start_seg. Just need // to update start region accordingly. if ((plug_end == heap_segment_mem (seg)) && (seg != start_seg) && !heap_segment_read_only_p (seg)) { //prepare for deletion dprintf (3, ("Preparing empty large segment %Ix", (size_t)seg)); assert (prev_seg); heap_segment_next (prev_seg) = next_seg; heap_segment_next (seg) = freeable_uoh_segment; freeable_uoh_segment = seg; #ifdef USE_REGIONS update_start_tail_regions (gen, seg, prev_seg, next_seg); #endif //USE_REGIONS } else { if (!heap_segment_read_only_p (seg)) { dprintf (3, ("Trimming seg to %Ix[", (size_t)plug_end)); heap_segment_allocated (seg) = plug_end; decommit_heap_segment_pages (seg, 0); } prev_seg = seg; } seg = next_seg; if (seg == 0) break; else { o = heap_segment_mem (seg); plug_end = o; dprintf (3, ("seg: %Ix, [%Ix, %Ix[", (size_t)seg, (size_t)heap_segment_mem (seg), (size_t)heap_segment_allocated (seg))); #ifdef USE_REGIONS continue; #endif //USE_REGIONS } } if (uoh_object_marked(o, TRUE)) { plug_start = o; //everything between plug_end and plug_start is free thread_gap (plug_end, plug_start-plug_end, gen); BOOL m = TRUE; while (m) { o = o + AlignQword (size (o)); if (o >= heap_segment_allocated (seg)) { break; } m = uoh_object_marked (o, TRUE); } plug_end = o; dprintf (3, ("plug [%Ix, %Ix[", (size_t)plug_start, (size_t)plug_end)); } else { while (o < heap_segment_allocated (seg) && !uoh_object_marked(o, FALSE)) { o = o + AlignQword (size (o)); } } } generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(generation_allocation_segment(gen) != NULL); } void gc_heap::relocate_in_uoh_objects (int gen_num) { generation* gen = generation_of (gen_num); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(seg != NULL); uint8_t* o = get_uoh_start_object (seg, gen); while (1) { if (o >= heap_segment_allocated (seg)) { seg = heap_segment_next_rw (seg); if (seg == 0) break; else { o = heap_segment_mem (seg); } } while (o < heap_segment_allocated (seg)) { check_class_object_demotion (o); if (contain_pointers (o)) { dprintf(3, ("Relocating through uoh object %Ix", (size_t)o)); go_through_object_nostart (method_table (o), o, size(o), pval, { reloc_survivor_helper (pval); }); } o = o + AlignQword (size (o)); } } } void gc_heap::mark_through_cards_for_uoh_objects (card_fn fn, int gen_num, BOOL relocating CARD_MARKING_STEALING_ARG(gc_heap* hpt)) { #ifdef USE_REGIONS uint8_t* low = 0; #else uint8_t* low = gc_low; #endif //USE_REGIONS size_t end_card = 0; generation* oldest_gen = generation_of (gen_num); heap_segment* seg = heap_segment_rw (generation_start_segment (oldest_gen)); PREFIX_ASSUME(seg != NULL); uint8_t* beg = get_uoh_start_object (seg, oldest_gen); uint8_t* end = heap_segment_allocated (seg); size_t cg_pointers_found = 0; size_t card_word_end = (card_of (align_on_card_word (end)) / card_word_width); size_t n_eph = 0; size_t n_gen = 0; size_t n_card_set = 0; #ifdef USE_REGIONS uint8_t* next_boundary = 0; uint8_t* nhigh = 0; #else uint8_t* next_boundary = (relocating ? generation_plan_allocation_start (generation_of (max_generation -1)) : ephemeral_low); uint8_t* nhigh = (relocating ? heap_segment_plan_allocated (ephemeral_heap_segment) : ephemeral_high); #endif //USE_REGIONS BOOL foundp = FALSE; uint8_t* start_address = 0; uint8_t* limit = 0; size_t card = card_of (beg); uint8_t* o = beg; #ifdef BACKGROUND_GC BOOL consider_bgc_mark_p = FALSE; BOOL check_current_sweep_p = FALSE; BOOL check_saved_sweep_p = FALSE; should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p); #endif //BACKGROUND_GC size_t total_cards_cleared = 0; #ifdef FEATURE_CARD_MARKING_STEALING VOLATILE(uint32_t)* chunk_index = (VOLATILE(uint32_t)*) &(gen_num == loh_generation ? card_mark_chunk_index_loh : card_mark_chunk_index_poh); card_marking_enumerator card_mark_enumerator(seg, low, chunk_index); card_word_end = 0; #endif // FEATURE_CARD_MARKING_STEALING #ifdef USE_REGIONS int condemned_gen = settings.condemned_generation; #else int condemned_gen = -1; #endif //USE_REGIONS //dprintf(3,( "scanning large objects from %Ix to %Ix", (size_t)beg, (size_t)end)); dprintf(3, ("CMl: %Ix->%Ix", (size_t)beg, (size_t)end)); while (1) { if ((o < end) && (card_of(o) > card)) { dprintf (3, ("Found %Id cg pointers", cg_pointers_found)); if (cg_pointers_found == 0) { uint8_t* last_object_processed = o; #ifdef FEATURE_CARD_MARKING_STEALING last_object_processed = min(limit, o); #endif // FEATURE_CARD_MARKING_STEALING dprintf (3, (" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card), (size_t)last_object_processed)); clear_cards (card, card_of((uint8_t*)last_object_processed)); total_cards_cleared += (card_of((uint8_t*)last_object_processed) - card); } n_eph +=cg_pointers_found; cg_pointers_found = 0; card = card_of ((uint8_t*)o); } if ((o < end) &&(card >= end_card)) { #ifdef FEATURE_CARD_MARKING_STEALING // find another chunk with some cards set foundp = find_next_chunk(card_mark_enumerator, seg, n_card_set, start_address, limit, card, end_card, card_word_end); #else // FEATURE_CARD_MARKING_STEALING foundp = find_card (card_table, card, card_word_end, end_card); if (foundp) { n_card_set+= end_card - card; start_address = max (beg, card_address (card)); } limit = min (end, card_address (end_card)); #endif // FEATURE_CARD_MARKING_STEALING } if ((!foundp) || (o >= end) || (card_address (card) >= end)) { if ((foundp) && (cg_pointers_found == 0)) { dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card), (size_t)card_address(card+1))); clear_cards (card, card+1); total_cards_cleared += 1; } n_eph +=cg_pointers_found; cg_pointers_found = 0; #ifdef FEATURE_CARD_MARKING_STEALING // we have decided to move to the next segment - make sure we exhaust the chunk enumerator for this segment card_mark_enumerator.exhaust_segment(seg); #endif // FEATURE_CARD_MARKING_STEALING if ((seg = heap_segment_next_rw (seg)) != 0) { #ifdef BACKGROUND_GC should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p); #endif //BACKGROUND_GC beg = heap_segment_mem (seg); end = compute_next_end (seg, low); #ifdef FEATURE_CARD_MARKING_STEALING card_word_end = 0; #else // FEATURE_CARD_MARKING_STEALING card_word_end = card_of (align_on_card_word (end)) / card_word_width; #endif // FEATURE_CARD_MARKING_STEALING card = card_of (beg); o = beg; end_card = 0; continue; } else { break; } } assert (card_set_p (card)); { dprintf(3,("card %Ix: o: %Ix, l: %Ix[ ", card, (size_t)o, (size_t)limit)); assert (Align (size (o)) >= Align (min_obj_size)); size_t s = size (o); uint8_t* next_o = o + AlignQword (s); Prefetch (next_o); while (o < limit) { s = size (o); assert (Align (s) >= Align (min_obj_size)); next_o = o + AlignQword (s); Prefetch (next_o); dprintf (4, ("|%Ix|", (size_t)o)); if (next_o < start_address) { goto end_object; } #ifdef BACKGROUND_GC if (!fgc_should_consider_object (o, seg, consider_bgc_mark_p, check_current_sweep_p, check_saved_sweep_p)) { goto end_object; } #endif //BACKGROUND_GC #ifdef COLLECTIBLE_CLASS if (is_collectible(o)) { BOOL passed_end_card_p = FALSE; if (card_of (o) > card) { passed_end_card_p = card_transition (o, end, card_word_end, cg_pointers_found, n_eph, n_card_set, card, end_card, foundp, start_address, limit, total_cards_cleared CARD_MARKING_STEALING_ARGS(card_mark_enumerator, seg, card_word_end)); } if ((!passed_end_card_p || foundp) && (card_of (o) == card)) { // card is valid and it covers the head of the object if (fn == &gc_heap::relocate_address) { cg_pointers_found++; } else { uint8_t* class_obj = get_class_object (o); mark_through_cards_helper (&class_obj, n_gen, cg_pointers_found, fn, nhigh, next_boundary, condemned_gen, max_generation CARD_MARKING_STEALING_ARG(hpt)); } } if (passed_end_card_p) { if (foundp && (card_address (card) < next_o)) { goto go_through_refs; } else { goto end_object; } } } go_through_refs: #endif //COLLECTIBLE_CLASS if (contain_pointers (o)) { dprintf(3,("Going through %Ix", (size_t)o)); go_through_object (method_table(o), o, s, poo, start_address, use_start, (o + s), { if (card_of ((uint8_t*)poo) > card) { BOOL passed_end_card_p = card_transition ((uint8_t*)poo, end, card_word_end, cg_pointers_found, n_eph, n_card_set, card, end_card, foundp, start_address, limit, total_cards_cleared CARD_MARKING_STEALING_ARGS(card_mark_enumerator, seg, card_word_end)); if (passed_end_card_p) { if (foundp && (card_address (card) < next_o)) { //new_start(); { if (ppstop <= (uint8_t**)start_address) {break;} else if (poo < (uint8_t**)start_address) {poo = (uint8_t**)start_address;} } } else { goto end_object; } } } mark_through_cards_helper (poo, n_gen, cg_pointers_found, fn, nhigh, next_boundary, condemned_gen, max_generation CARD_MARKING_STEALING_ARG(hpt)); } ); } end_object: o = next_o; } } } // compute the efficiency ratio of the card table if (!relocating) { #ifdef FEATURE_CARD_MARKING_STEALING Interlocked::ExchangeAddPtr(&n_eph_loh, n_eph); Interlocked::ExchangeAddPtr(&n_gen_loh, n_gen); dprintf (3, ("h%d marking h%d Mloh: cross: %Id, useful: %Id, cards set: %Id, cards cleared: %Id, ratio: %d", hpt->heap_number, heap_number, n_eph, n_gen, n_card_set, total_cards_cleared, (n_eph ? (int)(((float)n_gen / (float)n_eph) * 100) : 0))); dprintf (3, ("h%d marking h%d Mloh: total cross %Id, useful: %Id, running ratio: %d", hpt->heap_number, heap_number, (size_t)n_eph_loh, (size_t)n_gen_loh, (n_eph_loh ? (int)(((float)n_gen_loh / (float)n_eph_loh) * 100) : 0))); #else generation_skip_ratio = min (((n_eph > MIN_LOH_CROSS_GEN_REFS) ? (int)(((float)n_gen / (float)n_eph) * 100) : 100), generation_skip_ratio); dprintf (3, ("marking h%d Mloh: cross: %Id, useful: %Id, cards cleared: %Id, cards set: %Id, ratio: %d", heap_number, n_eph, n_gen, total_cards_cleared, n_card_set, generation_skip_ratio)); #endif //FEATURE_CARD_MARKING_STEALING } else { dprintf (3, ("R: Mloh: cross: %Id, useful: %Id, cards set: %Id, ratio: %d", n_eph, n_gen, n_card_set, generation_skip_ratio)); } } void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context) { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = NULL; #ifdef _PREFAST_ // prefix complains about us dereferencing hp in wks build even though we only access static members // this way. not sure how to shut it up except for this ugly workaround: PREFIX_ASSUME(hp != NULL); #endif // _PREFAST_ #endif //MULTIPLE_HEAPS for (int curr_gen_number = total_generation_count-1; curr_gen_number >= 0; curr_gen_number--) { generation* gen = hp->generation_of (curr_gen_number); heap_segment* seg = generation_start_segment (gen); #ifdef USE_REGIONS while (seg) { fn(context, curr_gen_number, heap_segment_mem (seg), heap_segment_allocated (seg), heap_segment_reserved (seg)); seg = heap_segment_next (seg); } #else while (seg && (seg != hp->ephemeral_heap_segment)) { assert (curr_gen_number > 0); // report bounds from heap_segment_mem (seg) to // heap_segment_allocated (seg); // for generation # curr_gen_number // for heap # heap_no fn(context, curr_gen_number, heap_segment_mem (seg), heap_segment_allocated (seg), (curr_gen_number > max_generation) ? heap_segment_reserved (seg) : heap_segment_allocated (seg)); seg = heap_segment_next (seg); } if (seg) { assert (seg == hp->ephemeral_heap_segment); assert (curr_gen_number <= max_generation); if (curr_gen_number == max_generation) { if (heap_segment_mem (seg) < generation_allocation_start (hp->generation_of (max_generation-1))) { // report bounds from heap_segment_mem (seg) to // generation_allocation_start (generation_of (max_generation-1)) // for heap # heap_number fn(context, curr_gen_number, heap_segment_mem (seg), generation_allocation_start (hp->generation_of (max_generation-1)), generation_allocation_start (hp->generation_of (max_generation-1)) ); } } else if (curr_gen_number != 0) { //report bounds from generation_allocation_start (generation_of (curr_gen_number)) // to generation_allocation_start (generation_of (curr_gen_number-1)) // for heap # heap_number fn(context, curr_gen_number, generation_allocation_start (hp->generation_of (curr_gen_number)), generation_allocation_start (hp->generation_of (curr_gen_number-1)), generation_allocation_start (hp->generation_of (curr_gen_number-1))); } else { //report bounds from generation_allocation_start (generation_of (curr_gen_number)) // to heap_segment_allocated (ephemeral_heap_segment); // for heap # heap_number fn(context, curr_gen_number, generation_allocation_start (hp->generation_of (curr_gen_number)), heap_segment_allocated (hp->ephemeral_heap_segment), heap_segment_reserved (hp->ephemeral_heap_segment) ); } } #endif //USE_REGIONS } } } #ifdef TRACE_GC // Note that when logging is on it can take a long time to go through the free items. void gc_heap::print_free_list (int gen, heap_segment* seg) { UNREFERENCED_PARAMETER(gen); UNREFERENCED_PARAMETER(seg); /* if (settings.concurrent == FALSE) { uint8_t* seg_start = heap_segment_mem (seg); uint8_t* seg_end = heap_segment_allocated (seg); dprintf (3, ("Free list in seg %Ix:", seg_start)); size_t total_free_item = 0; allocator* gen_allocator = generation_allocator (generation_of (gen)); for (unsigned int b = 0; b < gen_allocator->number_of_buckets(); b++) { uint8_t* fo = gen_allocator->alloc_list_head_of (b); while (fo) { if (fo >= seg_start && fo < seg_end) { total_free_item++; size_t free_item_len = size(fo); dprintf (3, ("[%Ix, %Ix[:%Id", (size_t)fo, (size_t)(fo + free_item_len), free_item_len)); } fo = free_list_slot (fo); } } dprintf (3, ("total %Id free items", total_free_item)); } */ } #endif //TRACE_GC void gc_heap::descr_generations (const char* msg) { #ifndef TRACE_GC UNREFERENCED_PARAMETER(msg); #endif //!TRACE_GC #ifdef STRESS_LOG if (StressLog::StressLogOn(LF_GC, LL_INFO10)) { gc_heap* hp = 0; #ifdef MULTIPLE_HEAPS hp= this; #endif //MULTIPLE_HEAPS STRESS_LOG1(LF_GC, LL_INFO10, "GC Heap %p\n", hp); for (int n = max_generation; n >= 0; --n) { #ifndef USE_REGIONS STRESS_LOG4(LF_GC, LL_INFO10, " Generation %d [%p, %p] cur = %p\n", n, generation_allocation_start(generation_of(n)), generation_allocation_limit(generation_of(n)), generation_allocation_pointer(generation_of(n))); #endif //USE_REGIONS heap_segment* seg = generation_start_segment(generation_of(n)); while (seg) { STRESS_LOG4(LF_GC, LL_INFO10, " Segment mem %p alloc = %p used %p committed %p\n", heap_segment_mem(seg), heap_segment_allocated(seg), heap_segment_used(seg), heap_segment_committed(seg)); seg = heap_segment_next(seg); } } } #endif // STRESS_LOG #ifdef TRACE_GC dprintf (2, ("lowest_address: %Ix highest_address: %Ix", (size_t) lowest_address, (size_t) highest_address)); #ifdef BACKGROUND_GC dprintf (2, ("bgc lowest_address: %Ix bgc highest_address: %Ix", (size_t) background_saved_lowest_address, (size_t) background_saved_highest_address)); #endif //BACKGROUND_GC if (heap_number == 0) { dprintf (1, ("total heap size: %Id, commit size: %Id", get_total_heap_size(), get_total_committed_size())); } for (int curr_gen_number = total_generation_count - 1; curr_gen_number >= 0; curr_gen_number--) { size_t total_gen_size = generation_size (curr_gen_number); #ifdef SIMPLE_DPRINTF dprintf (GTC_LOG, ("[%s][g%d]gen %d:, size: %Id, frag: %Id(L: %Id, O: %Id), f: %d%% %s %s %s", msg, settings.condemned_generation, curr_gen_number, total_gen_size, dd_fragmentation (dynamic_data_of (curr_gen_number)), generation_free_list_space (generation_of (curr_gen_number)), generation_free_obj_space (generation_of (curr_gen_number)), (total_gen_size ? (int)(((double)dd_fragmentation (dynamic_data_of (curr_gen_number)) / (double)total_gen_size) * 100) : 0), (settings.compaction ? "(compact)" : "(sweep)"), (settings.heap_expansion ? "(EX)" : " "), (settings.promotion ? "Promotion" : "NoPromotion"))); #else dprintf (2, ( "Generation %d: generation size: %Id, fragmentation: %Id", curr_gen_number, total_gen_size, dd_fragmentation (dynamic_data_of (curr_gen_number)))); #endif //SIMPLE_DPRINTF generation* gen = generation_of (curr_gen_number); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); #ifdef USE_REGIONS dprintf (1, ("g%d: start seg: %Ix alloc seg: %Ix, tail region: %Ix", curr_gen_number, heap_segment_mem (seg), heap_segment_mem (generation_allocation_segment (gen)), heap_segment_mem (generation_tail_region (gen)))); while (seg) { dprintf (GTC_LOG, ("g%d: (%d:p %d) [%Ix %Ix(sa: %Ix, pa: %Ix)[-%Ix[ (%Id) (%Id)", curr_gen_number, heap_segment_gen_num (seg), heap_segment_plan_gen_num (seg), (size_t)heap_segment_mem (seg), (size_t)heap_segment_allocated (seg), (size_t)heap_segment_saved_allocated (seg), (size_t)heap_segment_plan_allocated (seg), (size_t)heap_segment_committed (seg), (size_t)(heap_segment_allocated (seg) - heap_segment_mem (seg)), (size_t)(heap_segment_committed (seg) - heap_segment_allocated (seg)))); print_free_list (curr_gen_number, seg); seg = heap_segment_next (seg); } #else while (seg && (seg != ephemeral_heap_segment)) { dprintf (GTC_LOG, ("g%d: [%Ix %Ix[-%Ix[ (%Id) (%Id)", curr_gen_number, (size_t)heap_segment_mem (seg), (size_t)heap_segment_allocated (seg), (size_t)heap_segment_committed (seg), (size_t)(heap_segment_allocated (seg) - heap_segment_mem (seg)), (size_t)(heap_segment_committed (seg) - heap_segment_allocated (seg)))); print_free_list (curr_gen_number, seg); seg = heap_segment_next (seg); } if (seg && (seg != generation_start_segment (gen))) { dprintf (GTC_LOG, ("g%d: [%Ix %Ix[", curr_gen_number, (size_t)heap_segment_mem (seg), (size_t)generation_allocation_start (generation_of (curr_gen_number-1)))); print_free_list (curr_gen_number, seg); } else if (seg) { dprintf (GTC_LOG, ("g%d: [%Ix %Ix[", curr_gen_number, (size_t)generation_allocation_start (generation_of (curr_gen_number)), (size_t)(((curr_gen_number == 0)) ? (heap_segment_allocated (generation_start_segment (generation_of (curr_gen_number)))) : (generation_allocation_start (generation_of (curr_gen_number - 1)))) )); print_free_list (curr_gen_number, seg); } #endif //USE_REGIONS } #endif //TRACE_GC } //----------------------------------------------------------------------------- // // VM Specific support // //----------------------------------------------------------------------------- //Static member variables. VOLATILE(BOOL) GCHeap::GcInProgress = FALSE; GCEvent *GCHeap::WaitForGCEvent = NULL; unsigned GCHeap::GcCondemnedGeneration = 0; size_t GCHeap::totalSurvivedSize = 0; #ifdef FEATURE_PREMORTEM_FINALIZATION CFinalize* GCHeap::m_Finalize = 0; BOOL GCHeap::GcCollectClasses = FALSE; VOLATILE(int32_t) GCHeap::m_GCFLock = 0; #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way #ifdef STRESS_HEAP #ifndef MULTIPLE_HEAPS OBJECTHANDLE GCHeap::m_StressObjs[NUM_HEAP_STRESS_OBJS]; int GCHeap::m_CurStressObj = 0; #endif // !MULTIPLE_HEAPS #endif // STRESS_HEAP #endif // FEATURE_REDHAWK #endif //FEATURE_PREMORTEM_FINALIZATION class NoGCRegionLockHolder { public: NoGCRegionLockHolder() { enter_spin_lock_noinstru(&g_no_gc_lock); } ~NoGCRegionLockHolder() { leave_spin_lock_noinstru(&g_no_gc_lock); } }; // An explanation of locking for finalization: // // Multiple threads allocate objects. During the allocation, they are serialized by // the AllocLock above. But they release that lock before they register the object // for finalization. That's because there is much contention for the alloc lock, but // finalization is presumed to be a rare case. // // So registering an object for finalization must be protected by the FinalizeLock. // // There is another logical queue that involves finalization. When objects registered // for finalization become unreachable, they are moved from the "registered" queue to // the "unreachable" queue. Note that this only happens inside a GC, so no other // threads can be manipulating either queue at that time. Once the GC is over and // threads are resumed, the Finalizer thread will dequeue objects from the "unreachable" // queue and call their finalizers. This dequeue operation is also protected with // the finalize lock. // // At first, this seems unnecessary. Only one thread is ever enqueuing or dequeuing // on the unreachable queue (either the GC thread during a GC or the finalizer thread // when a GC is not in progress). The reason we share a lock with threads enqueuing // on the "registered" queue is that the "registered" and "unreachable" queues are // interrelated. // // They are actually two regions of a longer list, which can only grow at one end. // So to enqueue an object to the "registered" list, you actually rotate an unreachable // object at the boundary between the logical queues, out to the other end of the // unreachable queue -- where all growing takes place. Then you move the boundary // pointer so that the gap we created at the boundary is now on the "registered" // side rather than the "unreachable" side. Now the object can be placed into the // "registered" side at that point. This is much more efficient than doing moves // of arbitrarily long regions, but it causes the two queues to require a shared lock. // // Notice that Enter/LeaveFinalizeLock is not a GC-aware spin lock. Instead, it relies // on the fact that the lock will only be taken for a brief period and that it will // never provoke or allow a GC while the lock is held. This is critical. If the // FinalizeLock used enter_spin_lock (and thus sometimes enters preemptive mode to // allow a GC), then the Alloc client would have to GC protect a finalizable object // to protect against that eventuality. That is too slow! BOOL IsValidObject99(uint8_t *pObject) { #ifdef VERIFY_HEAP if (!((CObjectHeader*)pObject)->IsFree()) ((CObjectHeader *) pObject)->Validate(); #endif //VERIFY_HEAP return(TRUE); } #ifdef BACKGROUND_GC BOOL gc_heap::bgc_mark_array_range (heap_segment* seg, BOOL whole_seg_p, uint8_t** range_beg, uint8_t** range_end) { uint8_t* seg_start = heap_segment_mem (seg); uint8_t* seg_end = (whole_seg_p ? heap_segment_reserved (seg) : align_on_mark_word (heap_segment_allocated (seg))); if ((seg_start < background_saved_highest_address) && (seg_end > background_saved_lowest_address)) { *range_beg = max (seg_start, background_saved_lowest_address); *range_end = min (seg_end, background_saved_highest_address); return TRUE; } else { return FALSE; } } void gc_heap::bgc_verify_mark_array_cleared (heap_segment* seg) { #ifdef VERIFY_HEAP if (gc_heap::background_running_p() && (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)) { uint8_t* range_beg = 0; uint8_t* range_end = 0; if (bgc_mark_array_range (seg, TRUE, &range_beg, &range_end)) { size_t markw = mark_word_of (range_beg); size_t markw_end = mark_word_of (range_end); while (markw < markw_end) { if (mark_array [markw]) { dprintf (1, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", markw, mark_array [markw], mark_word_address (markw))); FATAL_GC_ERROR(); } markw++; } uint8_t* p = mark_word_address (markw_end); while (p < range_end) { assert (!(mark_array_marked (p))); p++; } } } #endif //VERIFY_HEAP } void gc_heap::verify_mark_bits_cleared (uint8_t* obj, size_t s) { #ifdef VERIFY_HEAP size_t start_mark_bit = mark_bit_of (obj) + 1; size_t end_mark_bit = mark_bit_of (obj + s); unsigned int startbit = mark_bit_bit (start_mark_bit); unsigned int endbit = mark_bit_bit (end_mark_bit); size_t startwrd = mark_bit_word (start_mark_bit); size_t endwrd = mark_bit_word (end_mark_bit); unsigned int result = 0; unsigned int firstwrd = ~(lowbits (~0, startbit)); unsigned int lastwrd = ~(highbits (~0, endbit)); if (startwrd == endwrd) { unsigned int wrd = firstwrd & lastwrd; result = mark_array[startwrd] & wrd; if (result) { FATAL_GC_ERROR(); } return; } // verify the first mark word is cleared. if (startbit) { result = mark_array[startwrd] & firstwrd; if (result) { FATAL_GC_ERROR(); } startwrd++; } for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++) { result = mark_array[wrdtmp]; if (result) { FATAL_GC_ERROR(); } } // set the last mark word. if (endbit) { result = mark_array[endwrd] & lastwrd; if (result) { FATAL_GC_ERROR(); } } #endif //VERIFY_HEAP } void gc_heap::clear_all_mark_array() { for (int i = get_start_generation_index(); i < total_generation_count; i++) { generation* gen = generation_of (i); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); while (seg) { uint8_t* range_beg = 0; uint8_t* range_end = 0; if (bgc_mark_array_range (seg, (seg == ephemeral_heap_segment), &range_beg, &range_end)) { size_t markw = mark_word_of (range_beg); size_t markw_end = mark_word_of (range_end); size_t size_total = (markw_end - markw) * sizeof (uint32_t); //num_dwords_written = markw_end - markw; size_t size = 0; size_t size_left = 0; assert (((size_t)&mark_array[markw] & (sizeof(PTR_PTR)-1)) == 0); if ((size_total & (sizeof(PTR_PTR) - 1)) != 0) { size = (size_total & ~(sizeof(PTR_PTR) - 1)); size_left = size_total - size; assert ((size_left & (sizeof (uint32_t) - 1)) == 0); } else { size = size_total; } memclr ((uint8_t*)&mark_array[markw], size); if (size_left != 0) { uint32_t* markw_to_clear = &mark_array[markw + size / sizeof (uint32_t)]; for (size_t i = 0; i < (size_left / sizeof (uint32_t)); i++) { *markw_to_clear = 0; markw_to_clear++; } } } seg = heap_segment_next_rw (seg); } } } void gc_heap::verify_mark_array_cleared() { #ifdef VERIFY_HEAP if (gc_heap::background_running_p() && (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)) { for (int i = get_start_generation_index(); i < total_generation_count; i++) { generation* gen = generation_of (i); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); while (seg) { bgc_verify_mark_array_cleared (seg); seg = heap_segment_next_rw (seg); } } } #endif //VERIFY_HEAP } #endif //BACKGROUND_GC // This function is called to make sure we don't mess up the segment list // in SOH. It's called by: // 1) begin and end of ephemeral GCs // 2) during bgc sweep when we switch segments. void gc_heap::verify_soh_segment_list() { #ifdef VERIFY_HEAP if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { for (int i = get_start_generation_index(); i <= max_generation; i++) { generation* gen = generation_of (i); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); heap_segment* last_seg = 0; while (seg) { last_seg = seg; seg = heap_segment_next_rw (seg); } #ifdef USE_REGIONS if (last_seg != generation_tail_region (gen)) #else if (last_seg != ephemeral_heap_segment) #endif //USE_REGIONS { FATAL_GC_ERROR(); } } } #endif //VERIFY_HEAP } // This function can be called at any foreground GCs or blocking GCs. For background GCs, // it can be called at the end of the final marking; and at any point during background // sweep. // NOTE - to be able to call this function during background sweep, we need to temporarily // NOT clear the mark array bits as we go. #ifdef BACKGROUND_GC void gc_heap::verify_partial() { // Different ways to fail. BOOL mark_missed_p = FALSE; BOOL bad_ref_p = FALSE; BOOL free_ref_p = FALSE; for (int i = get_start_generation_index(); i < total_generation_count; i++) { generation* gen = generation_of (i); int align_const = get_alignment_constant (i == max_generation); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); while (seg) { uint8_t* o = heap_segment_mem (seg); uint8_t* end = heap_segment_allocated (seg); while (o < end) { size_t s = size (o); BOOL marked_p = background_object_marked (o, FALSE); if (marked_p) { go_through_object_cl (method_table (o), o, s, oo, { if (*oo) { //dprintf (3, ("VOM: verifying member %Ix in obj %Ix", (size_t)*oo, o)); MethodTable *pMT = method_table (*oo); if (pMT == g_gc_pFreeObjectMethodTable) { free_ref_p = TRUE; FATAL_GC_ERROR(); } if (!pMT->SanityCheck()) { bad_ref_p = TRUE; dprintf (1, ("Bad member of %Ix %Ix", (size_t)oo, (size_t)*oo)); FATAL_GC_ERROR(); } if (current_bgc_state == bgc_final_marking) { if (marked_p && !background_object_marked (*oo, FALSE)) { mark_missed_p = TRUE; FATAL_GC_ERROR(); } } } } ); } o = o + Align(s, align_const); } seg = heap_segment_next_rw (seg); } } } #endif //BACKGROUND_GC #ifdef VERIFY_HEAP void gc_heap::verify_free_lists () { for (int gen_num = 0; gen_num < total_generation_count; gen_num++) { dprintf (3, ("Verifying free list for gen:%d", gen_num)); allocator* gen_alloc = generation_allocator (generation_of (gen_num)); size_t sz = gen_alloc->first_bucket_size(); bool verify_undo_slot = (gen_num != 0) && (gen_num <= max_generation) && !gen_alloc->discard_if_no_fit_p(); for (unsigned int a_l_number = 0; a_l_number < gen_alloc->number_of_buckets(); a_l_number++) { uint8_t* free_list = gen_alloc->alloc_list_head_of (a_l_number); uint8_t* prev = 0; while (free_list) { if (!((CObjectHeader*)free_list)->IsFree()) { dprintf (1, ("Verifiying Heap: curr free list item %Ix isn't a free object)", (size_t)free_list)); FATAL_GC_ERROR(); } if (((a_l_number < (gen_alloc->number_of_buckets()-1))&& (unused_array_size (free_list) >= sz)) || ((a_l_number != 0) && (unused_array_size (free_list) < sz/2))) { dprintf (1, ("Verifiying Heap: curr free list item %Ix isn't in the right bucket", (size_t)free_list)); FATAL_GC_ERROR(); } if (verify_undo_slot && (free_list_undo (free_list) != UNDO_EMPTY)) { dprintf (1, ("Verifiying Heap: curr free list item %Ix has non empty undo slot", (size_t)free_list)); FATAL_GC_ERROR(); } if ((gen_num <= max_generation) && (object_gennum (free_list)!= gen_num)) { dprintf (1, ("Verifiying Heap: curr free list item %Ix is in the wrong generation free list", (size_t)free_list)); FATAL_GC_ERROR(); } #ifdef DOUBLY_LINKED_FL uint8_t* prev_free_item = free_list_prev (free_list); if (gen_num == max_generation) { if (prev_free_item != prev) { dprintf (1, ("%Ix prev should be: %Ix, actual: %Ix", free_list, prev_free_item, prev)); FATAL_GC_ERROR(); } } #endif //DOUBLY_LINKED_FL prev = free_list; free_list = free_list_slot (free_list); } //verify the sanity of the tail uint8_t* tail = gen_alloc->alloc_list_tail_of (a_l_number); if (!((tail == 0) || (tail == prev))) { dprintf (1, ("Verifying Heap: tail of free list is not correct, tail %Ix, prev %Ix", tail, prev)); FATAL_GC_ERROR(); } if (tail == 0) { uint8_t* head = gen_alloc->alloc_list_head_of (a_l_number); if ((head != 0) && (free_list_slot (head) != 0)) { dprintf (1, ("Verifying Heap: head of free list is not correct, head %Ix -> %Ix", head, free_list_slot (head))); FATAL_GC_ERROR(); } } sz *=2; } } } void gc_heap::verify_regions (int gen_number, bool can_verify_gen_num, bool can_verify_tail) { #ifdef USE_REGIONS // For the given generation, verify that // // 1) it has at least one region. // 2) the tail region is the same as the last region if we following the list of regions // in that generation. // 3) no region is pointing to itself. // 4) if we can verify gen num, each region's gen_num and plan_gen_num are the same and // they are the right generation. generation* gen = generation_of (gen_number); int num_regions_in_gen = 0; heap_segment* seg_in_gen = heap_segment_rw (generation_start_segment (gen)); heap_segment* prev_region_in_gen = 0; heap_segment* tail_region = generation_tail_region (gen); while (seg_in_gen) { if (can_verify_gen_num) { if (heap_segment_gen_num (seg_in_gen) != min (gen_number, max_generation)) { dprintf (REGIONS_LOG, ("h%d gen%d region %Ix(%Ix) gen is %d!", heap_number, gen_number, seg_in_gen, heap_segment_mem (seg_in_gen), heap_segment_gen_num (seg_in_gen))); FATAL_GC_ERROR(); } if (heap_segment_gen_num (seg_in_gen) != heap_segment_plan_gen_num (seg_in_gen)) { dprintf (REGIONS_LOG, ("h%d gen%d region %Ix(%Ix) gen is %d but plan gen is %d!!", heap_number, gen_number, seg_in_gen, heap_segment_mem (seg_in_gen), heap_segment_gen_num (seg_in_gen), heap_segment_plan_gen_num (seg_in_gen))); FATAL_GC_ERROR(); } } if (heap_segment_allocated (seg_in_gen) > heap_segment_reserved (seg_in_gen)) { dprintf (REGIONS_LOG, ("h%d gen%d region %Ix alloc %Ix > reserved %Ix!!", heap_number, gen_number, heap_segment_mem (seg_in_gen), heap_segment_allocated (seg_in_gen), heap_segment_reserved (seg_in_gen))); FATAL_GC_ERROR(); } prev_region_in_gen = seg_in_gen; num_regions_in_gen++; heap_segment* next_region = heap_segment_next (seg_in_gen); if (seg_in_gen == next_region) { dprintf (REGIONS_LOG, ("h%d gen%d region %Ix(%Ix) pointing to itself!!", heap_number, gen_number, seg_in_gen, heap_segment_mem (seg_in_gen))); FATAL_GC_ERROR(); } seg_in_gen = next_region; } if (num_regions_in_gen == 0) { dprintf (REGIONS_LOG, ("h%d gen%d has no regions!!", heap_number, gen_number)); FATAL_GC_ERROR(); } if (can_verify_tail && (tail_region != prev_region_in_gen)) { dprintf (REGIONS_LOG, ("h%d gen%d tail region is %Ix(%Ix), diff from last region %Ix(%Ix)!!", heap_number, gen_number, tail_region, heap_segment_mem (tail_region), prev_region_in_gen, heap_segment_mem (prev_region_in_gen))); FATAL_GC_ERROR(); } #endif //USE_REGIONS } inline bool is_user_alloc_gen (int gen_number) { return ((gen_number == soh_gen0) || (gen_number == loh_generation) || (gen_number == poh_generation)); } void gc_heap::verify_regions (bool can_verify_gen_num, bool concurrent_p) { #ifdef USE_REGIONS for (int i = 0; i < total_generation_count; i++) { bool can_verify_tail = (concurrent_p ? !is_user_alloc_gen (i) : true); verify_regions (i, can_verify_gen_num, can_verify_tail); } #endif //USE_REGIONS } BOOL gc_heap::check_need_card (uint8_t* child_obj, int gen_num_for_cards, uint8_t* low, uint8_t* high) { #ifdef USE_REGIONS return (get_region_gen_num (child_obj) < gen_num_for_cards); #else return ((child_obj < high) && (child_obj >= low)); #endif //USE_REGIONS } void gc_heap::enter_gc_lock_for_verify_heap() { #ifdef VERIFY_HEAP if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { enter_spin_lock (&gc_heap::gc_lock); dprintf (SPINLOCK_LOG, ("enter gc_lock for verify_heap")); } #endif // VERIFY_HEAP } void gc_heap::leave_gc_lock_for_verify_heap() { #ifdef VERIFY_HEAP if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { dprintf (SPINLOCK_LOG, ("leave gc_lock taken for verify_heap")); leave_spin_lock (&gc_heap::gc_lock); } #endif // VERIFY_HEAP } void gc_heap::verify_heap (BOOL begin_gc_p) { int heap_verify_level = static_cast<int>(GCConfig::GetHeapVerifyLevel()); #ifdef MULTIPLE_HEAPS t_join* current_join = &gc_t_join; #ifdef BACKGROUND_GC if (settings.concurrent && (bgc_thread_id.IsCurrentThread())) { // We always call verify_heap on entry of GC on the SVR GC threads. current_join = &bgc_t_join; } #endif //BACKGROUND_GC #endif //MULTIPLE_HEAPS #ifndef TRACE_GC UNREFERENCED_PARAMETER(begin_gc_p); #endif //!TRACE_GC #ifdef BACKGROUND_GC dprintf (2,("[%s]GC#%d(%s): Verifying heap - begin", (begin_gc_p ? "BEG" : "END"), VolatileLoad(&settings.gc_index), (settings.concurrent ? "BGC" : (gc_heap::background_running_p() ? "FGC" : "NGC")))); #else dprintf (2,("[%s]GC#%d: Verifying heap - begin", (begin_gc_p ? "BEG" : "END"), VolatileLoad(&settings.gc_index))); #endif //BACKGROUND_GC #ifndef MULTIPLE_HEAPS #ifndef USE_REGIONS if ((ephemeral_low != generation_allocation_start (generation_of (max_generation - 1))) || (ephemeral_high != heap_segment_reserved (ephemeral_heap_segment))) { FATAL_GC_ERROR(); } #endif //!USE_REGIONS #endif //MULTIPLE_HEAPS #ifdef BACKGROUND_GC //don't touch the memory because the program is allocating from it. if (!settings.concurrent) #endif //BACKGROUND_GC { if (!(heap_verify_level & GCConfig::HEAPVERIFY_NO_MEM_FILL)) { // 0xaa the unused portions of segments. for (int i = get_start_generation_index(); i < total_generation_count; i++) { generation* gen1 = generation_of (i); heap_segment* seg1 = heap_segment_rw (generation_start_segment (gen1)); while (seg1) { uint8_t* clear_start = heap_segment_allocated (seg1) - plug_skew; if (heap_segment_used (seg1) > clear_start) { dprintf (3, ("setting end of seg %Ix: [%Ix-[%Ix to 0xaa", heap_segment_mem (seg1), clear_start , heap_segment_used (seg1))); memset (heap_segment_allocated (seg1) - plug_skew, 0xaa, (heap_segment_used (seg1) - clear_start)); } seg1 = heap_segment_next_rw (seg1); } } } } #ifdef MULTIPLE_HEAPS current_join->join(this, gc_join_verify_copy_table); if (current_join->joined()) { // in concurrent GC, new segment could be allocated when GC is working so the card brick table might not be updated at this point for (int i = 0; i < n_heaps; i++) { //copy the card and brick tables if (g_gc_card_table != g_heaps[i]->card_table) { g_heaps[i]->copy_brick_card_table(); } } current_join->restart(); } #else if (g_gc_card_table != card_table) copy_brick_card_table(); #endif //MULTIPLE_HEAPS //verify that the generation structures makes sense { #ifdef USE_REGIONS verify_regions (true, settings.concurrent); #else //USE_REGIONS generation* gen = generation_of (max_generation); assert (generation_allocation_start (gen) == heap_segment_mem (heap_segment_rw (generation_start_segment (gen)))); int gen_num = max_generation-1; generation* prev_gen = gen; while (gen_num >= 0) { gen = generation_of (gen_num); assert (generation_allocation_segment (gen) == ephemeral_heap_segment); assert (generation_allocation_start (gen) >= heap_segment_mem (ephemeral_heap_segment)); assert (generation_allocation_start (gen) < heap_segment_allocated (ephemeral_heap_segment)); if (generation_start_segment (prev_gen ) == generation_start_segment (gen)) { assert (generation_allocation_start (prev_gen) < generation_allocation_start (gen)); } prev_gen = gen; gen_num--; } #endif //USE_REGIONS } size_t total_objects_verified = 0; size_t total_objects_verified_deep = 0; BOOL bCurrentBrickInvalid = FALSE; size_t last_valid_brick = 0; size_t curr_brick = 0; size_t prev_brick = (size_t)-1; int gen_num_for_cards = 0; #ifdef USE_REGIONS int gen_num_to_stop = 0; uint8_t* e_high = 0; uint8_t* next_boundary = 0; #else //USE_REGIONS // For no regions the gen number is seperately reduced when we detect the ephemeral seg. int gen_num_to_stop = max_generation; uint8_t* e_high = ephemeral_high; uint8_t* next_boundary = generation_allocation_start (generation_of (max_generation - 1)); uint8_t* begin_youngest = generation_allocation_start(generation_of(0)); #endif //!USE_REGIONS // go through all generations starting with the highest for (int curr_gen_num = total_generation_count - 1; curr_gen_num >= gen_num_to_stop; curr_gen_num--) { int align_const = get_alignment_constant (curr_gen_num == max_generation); BOOL large_brick_p = (curr_gen_num > max_generation); #ifdef USE_REGIONS gen_num_for_cards = ((curr_gen_num >= max_generation) ? max_generation : curr_gen_num); #endif //USE_REGIONS heap_segment* seg = heap_segment_in_range (generation_start_segment (generation_of (curr_gen_num) )); while (seg) { uint8_t* curr_object = heap_segment_mem (seg); uint8_t* prev_object = 0; #ifdef USE_REGIONS if (heap_segment_gen_num (seg) != heap_segment_plan_gen_num (seg)) { dprintf (1, ("Seg %Ix, gen num is %d, plan gen num is %d", heap_segment_mem (seg), heap_segment_gen_num (seg), heap_segment_plan_gen_num (seg))); FATAL_GC_ERROR(); } #endif //USE_REGIONS #ifdef BACKGROUND_GC BOOL consider_bgc_mark_p = FALSE; BOOL check_current_sweep_p = FALSE; BOOL check_saved_sweep_p = FALSE; should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p); #endif //BACKGROUND_GC while (curr_object < heap_segment_allocated (seg)) { if (is_mark_set (curr_object)) { dprintf (1, ("curr_object: %Ix is marked!",(size_t)curr_object)); FATAL_GC_ERROR(); } size_t s = size (curr_object); dprintf (3, ("o: %Ix, s: %d", (size_t)curr_object, s)); if (s == 0) { dprintf (1, ("Verifying Heap: size of current object %Ix == 0", curr_object)); FATAL_GC_ERROR(); } #ifndef USE_REGIONS // handle generation boundaries within ephemeral segment if (seg == ephemeral_heap_segment) { if ((curr_gen_num > 0) && (curr_object >= next_boundary)) { curr_gen_num--; if (curr_gen_num > 0) { next_boundary = generation_allocation_start (generation_of (curr_gen_num - 1)); } } } #endif //!USE_REGIONS #ifdef USE_REGIONS if (curr_gen_num != 0) #else // If object is not in the youngest generation, then lets // verify that the brick table is correct.... if (((seg != ephemeral_heap_segment) || (brick_of(curr_object) < brick_of(begin_youngest)))) #endif //USE_REGIONS { curr_brick = brick_of(curr_object); // Brick Table Verification... // // On brick transition // if brick is negative // verify that brick indirects to previous valid brick // else // set current brick invalid flag to be flipped if we // encounter an object at the correct place // if (curr_brick != prev_brick) { // If the last brick we were examining had positive // entry but we never found the matching object, then // we have a problem // If prev_brick was the last one of the segment // it's ok for it to be invalid because it is never looked at if (bCurrentBrickInvalid && (curr_brick != brick_of (heap_segment_mem (seg))) && !heap_segment_read_only_p (seg)) { dprintf (1, ("curr brick %Ix invalid", curr_brick)); FATAL_GC_ERROR(); } if (large_brick_p) { //large objects verify the table only if they are in //range. if ((heap_segment_reserved (seg) <= highest_address) && (heap_segment_mem (seg) >= lowest_address) && brick_table [curr_brick] != 0) { dprintf (1, ("curr_brick %Ix for large object %Ix is set to %Ix", curr_brick, (size_t)curr_object, (size_t)brick_table[curr_brick])); FATAL_GC_ERROR(); } else { bCurrentBrickInvalid = FALSE; } } else { // If the current brick contains a negative value make sure // that the indirection terminates at the last valid brick if (brick_table [curr_brick] <= 0) { if (brick_table [curr_brick] == 0) { dprintf(1, ("curr_brick %Ix for object %Ix set to 0", curr_brick, (size_t)curr_object)); FATAL_GC_ERROR(); } ptrdiff_t i = curr_brick; while ((i >= ((ptrdiff_t) brick_of (heap_segment_mem (seg)))) && (brick_table[i] < 0)) { i = i + brick_table[i]; } if (i < ((ptrdiff_t)(brick_of (heap_segment_mem (seg))) - 1)) { dprintf (1, ("ptrdiff i: %Ix < brick_of (heap_segment_mem (seg)):%Ix - 1. curr_brick: %Ix", i, brick_of (heap_segment_mem (seg)), curr_brick)); FATAL_GC_ERROR(); } bCurrentBrickInvalid = FALSE; } else if (!heap_segment_read_only_p (seg)) { bCurrentBrickInvalid = TRUE; } } } if (bCurrentBrickInvalid) { if (curr_object == (brick_address(curr_brick) + brick_table[curr_brick] - 1)) { bCurrentBrickInvalid = FALSE; last_valid_brick = curr_brick; } } } if (*((uint8_t**)curr_object) != (uint8_t *) g_gc_pFreeObjectMethodTable) { #ifdef FEATURE_LOH_COMPACTION if ((curr_gen_num == loh_generation) && (prev_object != 0)) { assert (method_table (prev_object) == g_gc_pFreeObjectMethodTable); } #endif //FEATURE_LOH_COMPACTION total_objects_verified++; BOOL can_verify_deep = TRUE; #ifdef BACKGROUND_GC can_verify_deep = fgc_should_consider_object (curr_object, seg, consider_bgc_mark_p, check_current_sweep_p, check_saved_sweep_p); #endif //BACKGROUND_GC BOOL deep_verify_obj = can_verify_deep; if ((heap_verify_level & GCConfig::HEAPVERIFY_DEEP_ON_COMPACT) && !settings.compaction) deep_verify_obj = FALSE; ((CObjectHeader*)curr_object)->ValidateHeap(deep_verify_obj); if (can_verify_deep) { if (curr_gen_num > 0) { BOOL need_card_p = FALSE; if (contain_pointers_or_collectible (curr_object)) { dprintf (4, ("curr_object: %Ix", (size_t)curr_object)); size_t crd = card_of (curr_object); BOOL found_card_p = card_set_p (crd); #ifdef COLLECTIBLE_CLASS if (is_collectible(curr_object)) { uint8_t* class_obj = get_class_object (curr_object); if (check_need_card (class_obj, gen_num_for_cards, next_boundary, e_high)) { if (!found_card_p) { dprintf (1, ("Card not set, curr_object = [%Ix:%Ix pointing to class object %Ix", card_of (curr_object), (size_t)curr_object, class_obj)); FATAL_GC_ERROR(); } } } #endif //COLLECTIBLE_CLASS if (contain_pointers(curr_object)) { go_through_object_nostart (method_table(curr_object), curr_object, s, oo, { if (crd != card_of ((uint8_t*)oo)) { crd = card_of ((uint8_t*)oo); found_card_p = card_set_p (crd); need_card_p = FALSE; } if (*oo && check_need_card (*oo, gen_num_for_cards, next_boundary, e_high)) { need_card_p = TRUE; } if (need_card_p && !found_card_p) { dprintf (1, ("Card not set, curr_object = [%Ix:%Ix, %Ix:%Ix[", card_of (curr_object), (size_t)curr_object, card_of (curr_object+Align(s, align_const)), (size_t)(curr_object+Align(s, align_const)))); FATAL_GC_ERROR(); } } ); } if (need_card_p && !found_card_p) { dprintf (1, ("Card not set, curr_object = [%Ix:%Ix, %Ix:%Ix[", card_of (curr_object), (size_t)curr_object, card_of (curr_object + Align(s, align_const)), (size_t)(curr_object + Align(s, align_const)))); FATAL_GC_ERROR(); } } } total_objects_verified_deep++; } } prev_object = curr_object; prev_brick = curr_brick; curr_object = curr_object + Align(s, align_const); if (curr_object < prev_object) { dprintf (1, ("overflow because of a bad object size: %Ix size %Ix", prev_object, s)); FATAL_GC_ERROR(); } } if (curr_object > heap_segment_allocated(seg)) { dprintf (1, ("Verifiying Heap: curr_object: %Ix > heap_segment_allocated (seg: %Ix) %Ix", (size_t)curr_object, (size_t)seg, heap_segment_allocated (seg))); FATAL_GC_ERROR(); } seg = heap_segment_next_in_range (seg); } } #ifdef BACKGROUND_GC dprintf (2, ("(%s)(%s)(%s) total_objects_verified is %Id, total_objects_verified_deep is %Id", (settings.concurrent ? "BGC" : (gc_heap::background_running_p () ? "FGC" : "NGC")), (begin_gc_p ? "BEG" : "END"), ((current_c_gc_state == c_gc_state_planning) ? "in plan" : "not in plan"), total_objects_verified, total_objects_verified_deep)); if (current_c_gc_state != c_gc_state_planning) { assert (total_objects_verified == total_objects_verified_deep); } #endif //BACKGROUND_GC verify_free_lists(); #ifdef FEATURE_PREMORTEM_FINALIZATION finalize_queue->CheckFinalizerObjects(); #endif // FEATURE_PREMORTEM_FINALIZATION { // to be consistent with handle table APIs pass a ScanContext* // to provide the heap number. the SC isn't complete though so // limit its scope to handle table verification. ScanContext sc; sc.thread_number = heap_number; GCScan::VerifyHandleTable(max_generation, max_generation, &sc); } #ifdef MULTIPLE_HEAPS current_join->join(this, gc_join_verify_objects_done); if (current_join->joined()) #endif //MULTIPLE_HEAPS { GCToEEInterface::VerifySyncTableEntry(); #ifdef MULTIPLE_HEAPS current_join->restart(); #endif //MULTIPLE_HEAPS } #ifdef BACKGROUND_GC if (settings.concurrent) { verify_mark_array_cleared(); } dprintf (2,("GC%d(%s): Verifying heap - end", VolatileLoad(&settings.gc_index), (settings.concurrent ? "BGC" : (gc_heap::background_running_p() ? "FGC" : "NGC")))); #else dprintf (2,("GC#d: Verifying heap - end", VolatileLoad(&settings.gc_index))); #endif //BACKGROUND_GC } #endif //VERIFY_HEAP void GCHeap::ValidateObjectMember (Object* obj) { #ifdef VERIFY_HEAP size_t s = size (obj); uint8_t* o = (uint8_t*)obj; go_through_object_cl (method_table (obj), o, s, oo, { uint8_t* child_o = *oo; if (child_o) { //dprintf (3, ("VOM: m: %Ix obj %Ix", (size_t)child_o, o)); MethodTable *pMT = method_table (child_o); assert(pMT); if (!pMT->SanityCheck()) { dprintf (1, ("Bad member of %Ix %Ix", (size_t)oo, (size_t)child_o)); FATAL_GC_ERROR(); } } } ); #endif // VERIFY_HEAP } HRESULT GCHeap::StaticShutdown() { deleteGCShadow(); GCScan::GcRuntimeStructuresValid (FALSE); // Cannot assert this, since we use SuspendEE as the mechanism to quiesce all // threads except the one performing the shutdown. // ASSERT( !GcInProgress ); // Guard against any more GC occurring and against any threads blocking // for GC to complete when the GC heap is gone. This fixes a race condition // where a thread in GC is destroyed as part of process destruction and // the remaining threads block for GC complete. //GCTODO //EnterAllocLock(); //Enter(); //EnterFinalizeLock(); //SetGCDone(); // during shutdown lot of threads are suspended // on this even, we don't want to wake them up just yet //CloseHandle (WaitForGCEvent); //find out if the global card table hasn't been used yet uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))]; if (card_table_refcount (ct) == 0) { destroy_card_table (ct); g_gc_card_table = nullptr; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES g_gc_card_bundle_table = nullptr; #endif #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP SoftwareWriteWatch::StaticClose(); #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP } //destroy all segments on the standby list while(gc_heap::segment_standby_list != 0) { heap_segment* next_seg = heap_segment_next (gc_heap::segment_standby_list); #ifdef MULTIPLE_HEAPS (gc_heap::g_heaps[0])->delete_heap_segment (gc_heap::segment_standby_list, FALSE); #else //MULTIPLE_HEAPS pGenGCHeap->delete_heap_segment (gc_heap::segment_standby_list, FALSE); #endif //MULTIPLE_HEAPS gc_heap::segment_standby_list = next_seg; } #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i ++) { delete gc_heap::g_heaps[i]->vm_heap; //destroy pure GC stuff gc_heap::destroy_gc_heap (gc_heap::g_heaps[i]); } #else gc_heap::destroy_gc_heap (pGenGCHeap); #endif //MULTIPLE_HEAPS gc_heap::shutdown_gc(); return S_OK; } // Wait until a garbage collection is complete // returns NOERROR if wait was OK, other error code if failure. // WARNING: This will not undo the must complete state. If you are // in a must complete when you call this, you'd better know what you're // doing. #ifdef FEATURE_PREMORTEM_FINALIZATION static HRESULT AllocateCFinalize(CFinalize **pCFinalize) { *pCFinalize = new (nothrow) CFinalize(); if (*pCFinalize == NULL || !(*pCFinalize)->Initialize()) return E_OUTOFMEMORY; return S_OK; } #endif // FEATURE_PREMORTEM_FINALIZATION // init the instance heap HRESULT GCHeap::Init(size_t hn) { HRESULT hres = S_OK; #ifdef MULTIPLE_HEAPS if ((pGenGCHeap = gc_heap::make_gc_heap(this, (int)hn)) == 0) hres = E_OUTOFMEMORY; #else UNREFERENCED_PARAMETER(hn); if (!gc_heap::make_gc_heap()) hres = E_OUTOFMEMORY; #endif //MULTIPLE_HEAPS // Failed. return hres; } //System wide initialization HRESULT GCHeap::Initialize() { HRESULT hr = S_OK; qpf = (uint64_t)GCToOSInterface::QueryPerformanceFrequency(); qpf_ms = 1000.0 / (double)qpf; qpf_us = 1000.0 * 1000.0 / (double)qpf; g_gc_pFreeObjectMethodTable = GCToEEInterface::GetFreeObjectMethodTable(); g_num_processors = GCToOSInterface::GetTotalProcessorCount(); assert(g_num_processors != 0); gc_heap::total_physical_mem = (size_t)GCConfig::GetGCTotalPhysicalMemory(); if (gc_heap::total_physical_mem != 0) { gc_heap::is_restricted_physical_mem = true; #ifdef FEATURE_EVENT_TRACE gc_heap::physical_memory_from_config = (size_t)gc_heap::total_physical_mem; #endif //FEATURE_EVENT_TRACE } else { gc_heap::total_physical_mem = GCToOSInterface::GetPhysicalMemoryLimit (&gc_heap::is_restricted_physical_mem); } #ifdef USE_REGIONS gc_heap::regions_range = (size_t)GCConfig::GetGCRegionRange(); #endif //USE_REGIONS #ifdef HOST_64BIT gc_heap::heap_hard_limit = (size_t)GCConfig::GetGCHeapHardLimit(); gc_heap::heap_hard_limit_oh[soh] = (size_t)GCConfig::GetGCHeapHardLimitSOH(); gc_heap::heap_hard_limit_oh[loh] = (size_t)GCConfig::GetGCHeapHardLimitLOH(); gc_heap::heap_hard_limit_oh[poh] = (size_t)GCConfig::GetGCHeapHardLimitPOH(); gc_heap::use_large_pages_p = GCConfig::GetGCLargePages(); if (gc_heap::heap_hard_limit_oh[soh] || gc_heap::heap_hard_limit_oh[loh] || gc_heap::heap_hard_limit_oh[poh]) { if (!gc_heap::heap_hard_limit_oh[soh]) { return CLR_E_GC_BAD_HARD_LIMIT; } if (!gc_heap::heap_hard_limit_oh[loh]) { return CLR_E_GC_BAD_HARD_LIMIT; } gc_heap::heap_hard_limit = gc_heap::heap_hard_limit_oh[soh] + gc_heap::heap_hard_limit_oh[loh] + gc_heap::heap_hard_limit_oh[poh]; } else { uint32_t percent_of_mem_soh = (uint32_t)GCConfig::GetGCHeapHardLimitSOHPercent(); uint32_t percent_of_mem_loh = (uint32_t)GCConfig::GetGCHeapHardLimitLOHPercent(); uint32_t percent_of_mem_poh = (uint32_t)GCConfig::GetGCHeapHardLimitPOHPercent(); if (percent_of_mem_soh || percent_of_mem_loh || percent_of_mem_poh) { if ((percent_of_mem_soh <= 0) || (percent_of_mem_soh >= 100)) { return CLR_E_GC_BAD_HARD_LIMIT; } if ((percent_of_mem_loh <= 0) || (percent_of_mem_loh >= 100)) { return CLR_E_GC_BAD_HARD_LIMIT; } else if ((percent_of_mem_poh < 0) || (percent_of_mem_poh >= 100)) { return CLR_E_GC_BAD_HARD_LIMIT; } if ((percent_of_mem_soh + percent_of_mem_loh + percent_of_mem_poh) >= 100) { return CLR_E_GC_BAD_HARD_LIMIT; } gc_heap::heap_hard_limit_oh[soh] = (size_t)(gc_heap::total_physical_mem * (uint64_t)percent_of_mem_soh / (uint64_t)100); gc_heap::heap_hard_limit_oh[loh] = (size_t)(gc_heap::total_physical_mem * (uint64_t)percent_of_mem_loh / (uint64_t)100); gc_heap::heap_hard_limit_oh[poh] = (size_t)(gc_heap::total_physical_mem * (uint64_t)percent_of_mem_poh / (uint64_t)100); gc_heap::heap_hard_limit = gc_heap::heap_hard_limit_oh[soh] + gc_heap::heap_hard_limit_oh[loh] + gc_heap::heap_hard_limit_oh[poh]; } } if (gc_heap::heap_hard_limit_oh[soh] && (!gc_heap::heap_hard_limit_oh[poh]) && (!gc_heap::use_large_pages_p)) { return CLR_E_GC_BAD_HARD_LIMIT; } if (!(gc_heap::heap_hard_limit)) { uint32_t percent_of_mem = (uint32_t)GCConfig::GetGCHeapHardLimitPercent(); if ((percent_of_mem > 0) && (percent_of_mem < 100)) { gc_heap::heap_hard_limit = (size_t)(gc_heap::total_physical_mem * (uint64_t)percent_of_mem / (uint64_t)100); } } // If the hard limit is specified, the user is saying even if the process is already // running in a container, use this limit for the GC heap. if (gc_heap::heap_hard_limit) { #ifdef FEATURE_EVENT_TRACE gc_heap::hard_limit_config_p = true; #endif //FEATURE_EVENT_TRACE } else { if (gc_heap::is_restricted_physical_mem) { uint64_t physical_mem_for_gc = gc_heap::total_physical_mem * (uint64_t)75 / (uint64_t)100; gc_heap::heap_hard_limit = (size_t)max ((20 * 1024 * 1024), physical_mem_for_gc); } } if ((!gc_heap::heap_hard_limit) && gc_heap::use_large_pages_p) { return CLR_E_GC_LARGE_PAGE_MISSING_HARD_LIMIT; } #endif //HOST_64BIT uint32_t nhp = 1; uint32_t nhp_from_config = 0; #ifdef MULTIPLE_HEAPS AffinitySet config_affinity_set; GCConfigStringHolder cpu_index_ranges_holder(GCConfig::GetGCHeapAffinitizeRanges()); if (!ParseGCHeapAffinitizeRanges(cpu_index_ranges_holder.Get(), &config_affinity_set)) { return CLR_E_GC_BAD_AFFINITY_CONFIG_FORMAT; } uintptr_t config_affinity_mask = static_cast<uintptr_t>(GCConfig::GetGCHeapAffinitizeMask()); const AffinitySet* process_affinity_set = GCToOSInterface::SetGCThreadsAffinitySet(config_affinity_mask, &config_affinity_set); if (process_affinity_set->IsEmpty()) { return CLR_E_GC_BAD_AFFINITY_CONFIG; } if ((cpu_index_ranges_holder.Get() != nullptr) #ifdef TARGET_WINDOWS || (config_affinity_mask != 0) #endif ) { affinity_config_specified_p = true; } nhp_from_config = static_cast<uint32_t>(GCConfig::GetHeapCount()); g_num_active_processors = GCToEEInterface::GetCurrentProcessCpuCount(); if (nhp_from_config) { // Even when the user specifies a heap count, it should not be more // than the number of procs this process can use. nhp_from_config = min (nhp_from_config, g_num_active_processors); } nhp = ((nhp_from_config == 0) ? g_num_active_processors : nhp_from_config); nhp = min (nhp, MAX_SUPPORTED_CPUS); gc_heap::gc_thread_no_affinitize_p = (gc_heap::heap_hard_limit ? !affinity_config_specified_p : (GCConfig::GetNoAffinitize() != 0)); if (!(gc_heap::gc_thread_no_affinitize_p)) { uint32_t num_affinitized_processors = (uint32_t)process_affinity_set->Count(); if (num_affinitized_processors != 0) { nhp = min(nhp, num_affinitized_processors); } } #endif //MULTIPLE_HEAPS size_t seg_size = 0; size_t large_seg_size = 0; size_t pin_seg_size = 0; #ifndef USE_REGIONS if (gc_heap::heap_hard_limit) { if (gc_heap::heap_hard_limit_oh[soh]) { #ifdef MULTIPLE_HEAPS if (nhp_from_config == 0) { for (int i = 0; i < (total_oh_count - 1); i++) { if (i == poh && gc_heap::heap_hard_limit_oh[poh] == 0) { // if size 0 was specified for POH, ignore it for the nhp computation continue; } uint32_t nhp_oh = (uint32_t)(gc_heap::heap_hard_limit_oh[i] / min_segment_size_hard_limit); nhp = min (nhp, nhp_oh); } if (nhp == 0) { nhp = 1; } } #endif seg_size = gc_heap::heap_hard_limit_oh[soh] / nhp; large_seg_size = gc_heap::heap_hard_limit_oh[loh] / nhp; pin_seg_size = (gc_heap::heap_hard_limit_oh[poh] != 0) ? (gc_heap::heap_hard_limit_oh[2] / nhp) : min_segment_size_hard_limit; size_t aligned_seg_size = align_on_segment_hard_limit (seg_size); size_t aligned_large_seg_size = align_on_segment_hard_limit (large_seg_size); size_t aligned_pin_seg_size = align_on_segment_hard_limit (pin_seg_size); if (!gc_heap::use_large_pages_p) { aligned_seg_size = round_up_power2 (aligned_seg_size); aligned_large_seg_size = round_up_power2 (aligned_large_seg_size); aligned_pin_seg_size = round_up_power2 (aligned_pin_seg_size); } size_t seg_size_from_config = (size_t)GCConfig::GetSegmentSize(); if (seg_size_from_config) { size_t aligned_seg_size_config = (gc_heap::use_large_pages_p ? align_on_segment_hard_limit (seg_size) : round_up_power2 (seg_size_from_config)); aligned_seg_size = max (aligned_seg_size, aligned_seg_size_config); aligned_large_seg_size = max (aligned_large_seg_size, aligned_seg_size_config); aligned_pin_seg_size = max (aligned_pin_seg_size, aligned_seg_size_config); } seg_size = aligned_seg_size; gc_heap::soh_segment_size = seg_size; large_seg_size = aligned_large_seg_size; pin_seg_size = aligned_pin_seg_size; } else { seg_size = gc_heap::get_segment_size_hard_limit (&nhp, (nhp_from_config == 0)); gc_heap::soh_segment_size = seg_size; large_seg_size = gc_heap::use_large_pages_p ? seg_size : seg_size * 2; pin_seg_size = large_seg_size; } if (gc_heap::use_large_pages_p) gc_heap::min_segment_size = min_segment_size_hard_limit; } else { seg_size = get_valid_segment_size(); gc_heap::soh_segment_size = seg_size; large_seg_size = get_valid_segment_size (TRUE); pin_seg_size = large_seg_size; } assert (g_theGCHeap->IsValidSegmentSize (seg_size)); assert (g_theGCHeap->IsValidSegmentSize (large_seg_size)); assert (g_theGCHeap->IsValidSegmentSize (pin_seg_size)); dprintf (1, ("%d heaps, soh seg size: %Id mb, loh: %Id mb\n", nhp, (seg_size / (size_t)1024 / 1024), (large_seg_size / 1024 / 1024))); gc_heap::min_uoh_segment_size = min (large_seg_size, pin_seg_size); if (gc_heap::min_segment_size == 0) { gc_heap::min_segment_size = min (seg_size, gc_heap::min_uoh_segment_size); } #endif //!USE_REGIONS #ifdef USE_REGIONS // REGIONS TODO: // soh_segment_size is used by a few places, I'm setting it temporarily and will // get rid of it. gc_heap::soh_segment_size = INITIAL_ALLOC; #ifdef MULTIPLE_HEAPS gc_heap::soh_segment_size /= 4; #endif //MULTIPLE_HEAPS size_t gc_region_size = (size_t)GCConfig::GetGCRegionSize(); if (!power_of_two_p(gc_region_size) || ((gc_region_size * nhp * 19) > gc_heap::regions_range)) { return E_OUTOFMEMORY; } gc_heap::min_segment_size_shr = index_of_highest_set_bit (gc_region_size); #else gc_heap::min_segment_size_shr = index_of_highest_set_bit (gc_heap::min_segment_size); #endif //USE_REGIONS #ifdef MULTIPLE_HEAPS gc_heap::n_heaps = nhp; hr = gc_heap::initialize_gc (seg_size, large_seg_size, pin_seg_size, nhp); #else hr = gc_heap::initialize_gc (seg_size, large_seg_size, pin_seg_size); #endif //MULTIPLE_HEAPS if (hr != S_OK) return hr; gc_heap::mem_one_percent = gc_heap::total_physical_mem / 100; #ifndef MULTIPLE_HEAPS gc_heap::mem_one_percent /= g_num_processors; #endif //!MULTIPLE_HEAPS uint32_t highmem_th_from_config = (uint32_t)GCConfig::GetGCHighMemPercent(); if (highmem_th_from_config) { gc_heap::high_memory_load_th = min (99, highmem_th_from_config); gc_heap::v_high_memory_load_th = min (99, (highmem_th_from_config + 7)); #ifdef FEATURE_EVENT_TRACE gc_heap::high_mem_percent_from_config = highmem_th_from_config; #endif //FEATURE_EVENT_TRACE } else { // We should only use this if we are in the "many process" mode which really is only applicable // to very powerful machines - before that's implemented, temporarily I am only enabling this for 80GB+ memory. // For now I am using an estimate to calculate these numbers but this should really be obtained // programmatically going forward. // I am assuming 47 processes using WKS GC and 3 using SVR GC. // I am assuming 3 in part due to the "very high memory load" is 97%. int available_mem_th = 10; if (gc_heap::total_physical_mem >= ((uint64_t)80 * 1024 * 1024 * 1024)) { int adjusted_available_mem_th = 3 + (int)((float)47 / (float)(GCToOSInterface::GetTotalProcessorCount())); available_mem_th = min (available_mem_th, adjusted_available_mem_th); } gc_heap::high_memory_load_th = 100 - available_mem_th; gc_heap::v_high_memory_load_th = 97; } gc_heap::m_high_memory_load_th = min ((gc_heap::high_memory_load_th + 5), gc_heap::v_high_memory_load_th); gc_heap::pm_stress_on = (GCConfig::GetGCProvModeStress() != 0); #if defined(HOST_64BIT) gc_heap::youngest_gen_desired_th = gc_heap::mem_one_percent; #endif // HOST_64BIT WaitForGCEvent = new (nothrow) GCEvent; if (!WaitForGCEvent) { return E_OUTOFMEMORY; } if (!WaitForGCEvent->CreateManualEventNoThrow(TRUE)) { return E_FAIL; } #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way #if defined (STRESS_HEAP) && !defined (MULTIPLE_HEAPS) if (GCStress<cfg_any>::IsEnabled()) { for (int i = 0; i < GCHeap::NUM_HEAP_STRESS_OBJS; i++) { m_StressObjs[i] = CreateGlobalHandle(0); } m_CurStressObj = 0; } #endif //STRESS_HEAP && !MULTIPLE_HEAPS #endif // FEATURE_REDHAWK initGCShadow(); // If we are debugging write barriers, initialize heap shadow #ifdef MULTIPLE_HEAPS for (uint32_t i = 0; i < nhp; i++) { GCHeap* Hp = new (nothrow) GCHeap(); if (!Hp) return E_OUTOFMEMORY; if ((hr = Hp->Init (i))!= S_OK) { return hr; } } heap_select::init_numa_node_to_heap_map (nhp); // If we have more active processors than heaps we still want to initialize some of the // mapping for the rest of the active processors because user threads can still run on // them which means it's important to know their numa nodes and map them to a reasonable // heap, ie, we wouldn't want to have all such procs go to heap 0. if (g_num_active_processors > nhp) heap_select::distribute_other_procs(); gc_heap* hp = gc_heap::g_heaps[0]; dynamic_data* gen0_dd = hp->dynamic_data_of (0); gc_heap::min_gen0_balance_delta = (dd_min_size (gen0_dd) >> 3); #ifdef HEAP_BALANCE_INSTRUMENTATION cpu_group_enabled_p = GCToOSInterface::CanEnableGCCPUGroups(); if (!GCToOSInterface::GetNumaInfo (&total_numa_nodes_on_machine, &procs_per_numa_node)) { total_numa_nodes_on_machine = 1; // Note that if we are in cpu groups we need to take the way proc index is calculated // into consideration. It would mean we have more than 64 procs on one numa node - // this is mostly for testing (if we want to simulate no numa on a numa system). // see vm\gcenv.os.cpp GroupProcNo implementation. if (GCToOSInterface::GetCPUGroupInfo (&total_cpu_groups_on_machine, &procs_per_cpu_group)) procs_per_numa_node = procs_per_cpu_group + ((total_cpu_groups_on_machine - 1) << 6); else procs_per_numa_node = g_num_processors; } hb_info_numa_nodes = new (nothrow) heap_balance_info_numa[total_numa_nodes_on_machine]; dprintf (HEAP_BALANCE_LOG, ("total: %d, numa: %d", g_num_processors, total_numa_nodes_on_machine)); int hb_info_size_per_proc = sizeof (heap_balance_info_proc); for (int numa_node_index = 0; numa_node_index < total_numa_nodes_on_machine; numa_node_index++) { int hb_info_size_per_node = hb_info_size_per_proc * procs_per_numa_node; uint8_t* numa_mem = (uint8_t*)GCToOSInterface::VirtualReserve (hb_info_size_per_node, 0, 0, numa_node_index); if (!numa_mem) return E_FAIL; if (!GCToOSInterface::VirtualCommit (numa_mem, hb_info_size_per_node, numa_node_index)) return E_FAIL; heap_balance_info_proc* hb_info_procs = (heap_balance_info_proc*)numa_mem; hb_info_numa_nodes[numa_node_index].hb_info_procs = hb_info_procs; for (int proc_index = 0; proc_index < (int)procs_per_numa_node; proc_index++) { heap_balance_info_proc* hb_info_proc = &hb_info_procs[proc_index]; hb_info_proc->count = default_max_hb_heap_balance_info; hb_info_proc->index = 0; } } #endif //HEAP_BALANCE_INSTRUMENTATION #else hr = Init (0); #endif //MULTIPLE_HEAPS #ifdef USE_REGIONS if (initial_regions) { delete[] initial_regions; } #endif //USE_REGIONS if (hr == S_OK) { GCScan::GcRuntimeStructuresValid (TRUE); GCToEEInterface::DiagUpdateGenerationBounds(); #if defined(STRESS_REGIONS) && defined(FEATURE_BASICFREEZE) #ifdef MULTIPLE_HEAPS gc_heap* hp = gc_heap::g_heaps[0]; #else gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS // allocate some artificial ro seg datastructures. for (int i = 0; i < 2; i++) { size_t ro_seg_size = 1024 * 1024; // I'm not allocating this within the normal reserved range // because ro segs are supposed to always be out of range // for regions. uint8_t* seg_mem = new (nothrow) uint8_t [ro_seg_size]; heap_segment* ro_seg = (heap_segment*) seg_mem; uint8_t* start = seg_mem + gc_heap::segment_info_size; heap_segment_mem (ro_seg) = start; heap_segment_used (ro_seg) = start; heap_segment_reserved (ro_seg) = seg_mem + ro_seg_size; heap_segment_committed (ro_seg) = heap_segment_reserved (ro_seg); gc_heap::init_heap_segment (ro_seg, hp, seg_mem, ro_seg_size, 2); ro_seg->flags = heap_segment_flags_readonly; hp->insert_ro_segment (ro_seg); } #endif //STRESS_REGIONS && FEATURE_BASICFREEZE } return hr; } //// // GC callback functions bool GCHeap::IsPromoted(Object* object) { #ifdef _DEBUG if (object) { ((CObjectHeader*)object)->Validate(); } #endif //_DEBUG uint8_t* o = (uint8_t*)object; if (gc_heap::settings.condemned_generation == max_generation) { #ifdef MULTIPLE_HEAPS gc_heap* hp = gc_heap::g_heaps[0]; #else gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS #ifdef BACKGROUND_GC if (gc_heap::settings.concurrent) { bool is_marked = (!((o < hp->background_saved_highest_address) && (o >= hp->background_saved_lowest_address))|| hp->background_marked (o)); return is_marked; } else #endif //BACKGROUND_GC { return (!((o < hp->highest_address) && (o >= hp->lowest_address)) || hp->is_mark_set (o)); } } else { #ifdef USE_REGIONS return (is_in_heap_range (o) ? (gc_heap::is_in_condemned_gc (o) ? gc_heap::is_mark_set (o) : true) : true); #else gc_heap* hp = gc_heap::heap_of (o); return (!((o < hp->gc_high) && (o >= hp->gc_low)) || hp->is_mark_set (o)); #endif //USE_REGIONS } } size_t GCHeap::GetPromotedBytes(int heap_index) { #ifdef BACKGROUND_GC if (gc_heap::settings.concurrent) { return gc_heap::bpromoted_bytes (heap_index); } else #endif //BACKGROUND_GC { gc_heap* hp = #ifdef MULTIPLE_HEAPS gc_heap::g_heaps[heap_index]; #else pGenGCHeap; #endif //MULTIPLE_HEAPS return hp->get_promoted_bytes(); } } void GCHeap::SetYieldProcessorScalingFactor (float scalingFactor) { assert (yp_spin_count_unit != 0); int saved_yp_spin_count_unit = yp_spin_count_unit; yp_spin_count_unit = (int)((float)yp_spin_count_unit * scalingFactor / (float)9); // It's very suspicious if it becomes 0 if (yp_spin_count_unit == 0) { yp_spin_count_unit = saved_yp_spin_count_unit; } } unsigned int GCHeap::WhichGeneration (Object* object) { uint8_t* o = (uint8_t*)object; #ifdef FEATURE_BASICFREEZE if (!((o < g_gc_highest_address) && (o >= g_gc_lowest_address))) { return max_generation; } #endif //FEATURE_BASICFREEZE gc_heap* hp = gc_heap::heap_of (o); unsigned int g = hp->object_gennum (o); dprintf (3, ("%Ix is in gen %d", (size_t)object, g)); return g; } unsigned int GCHeap::GetGenerationWithRange (Object* object, uint8_t** ppStart, uint8_t** ppAllocated, uint8_t** ppReserved) { int generation = -1; heap_segment * hs = gc_heap::find_segment ((uint8_t*)object, FALSE); #ifdef USE_REGIONS generation = heap_segment_gen_num (hs); if (generation == max_generation) { if (heap_segment_loh_p (hs)) { generation = loh_generation; } else if (heap_segment_poh_p (hs)) { generation = poh_generation; } } *ppStart = heap_segment_mem (hs); *ppAllocated = heap_segment_allocated (hs); *ppReserved = heap_segment_reserved (hs); #else #ifdef MULTIPLE_HEAPS gc_heap* hp = heap_segment_heap (hs); #else gc_heap* hp = __this; #endif //MULTIPLE_HEAPS if (hs == hp->ephemeral_heap_segment) { uint8_t* reserved = heap_segment_reserved (hs); uint8_t* end = heap_segment_allocated(hs); for (int gen = 0; gen < max_generation; gen++) { uint8_t* start = generation_allocation_start (hp->generation_of (gen)); if ((uint8_t*)object >= start) { generation = gen; *ppStart = start; *ppAllocated = end; *ppReserved = reserved; break; } end = reserved = start; } if (generation == -1) { generation = max_generation; *ppStart = heap_segment_mem (hs); *ppAllocated = *ppReserved = generation_allocation_start (hp->generation_of (max_generation - 1)); } } else { generation = max_generation; if (heap_segment_loh_p (hs)) { generation = loh_generation; } else if (heap_segment_poh_p (hs)) { generation = poh_generation; } *ppStart = heap_segment_mem (hs); *ppAllocated = heap_segment_allocated (hs); *ppReserved = heap_segment_reserved (hs); } #endif //USE_REGIONS return (unsigned int)generation; } bool GCHeap::IsEphemeral (Object* object) { uint8_t* o = (uint8_t*)object; gc_heap* hp = gc_heap::heap_of (o); return !!hp->ephemeral_pointer_p (o); } // Return NULL if can't find next object. When EE is not suspended, // the result is not accurate: if the input arg is in gen0, the function could // return zeroed out memory as next object Object * GCHeap::NextObj (Object * object) { #ifdef VERIFY_HEAP uint8_t* o = (uint8_t*)object; #ifndef FEATURE_BASICFREEZE if (!((o < g_gc_highest_address) && (o >= g_gc_lowest_address))) { return NULL; } #endif //!FEATURE_BASICFREEZE heap_segment * hs = gc_heap::find_segment (o, FALSE); if (!hs) { return NULL; } BOOL large_object_p = heap_segment_uoh_p (hs); if (large_object_p) return NULL; //could be racing with another core allocating. #ifdef MULTIPLE_HEAPS gc_heap* hp = heap_segment_heap (hs); #else //MULTIPLE_HEAPS gc_heap* hp = 0; #endif //MULTIPLE_HEAPS #ifdef USE_REGIONS unsigned int g = heap_segment_gen_num (hs); #else unsigned int g = hp->object_gennum ((uint8_t*)object); #endif if ((g == 0) && hp->settings.demotion) return NULL;//could be racing with another core allocating. int align_const = get_alignment_constant (!large_object_p); uint8_t* nextobj = o + Align (size (o), align_const); if (nextobj <= o) // either overflow or 0 sized object. { return NULL; } if ((nextobj < heap_segment_mem(hs)) || (nextobj >= heap_segment_allocated(hs) && hs != hp->ephemeral_heap_segment) || (nextobj >= hp->alloc_allocated)) { return NULL; } return (Object *)nextobj; #else return nullptr; #endif // VERIFY_HEAP } // returns TRUE if the pointer is in one of the GC heaps. bool GCHeap::IsHeapPointer (void* vpObject, bool small_heap_only) { uint8_t* object = (uint8_t*) vpObject; #ifndef FEATURE_BASICFREEZE if (!((object < g_gc_highest_address) && (object >= g_gc_lowest_address))) return FALSE; #endif //!FEATURE_BASICFREEZE heap_segment * hs = gc_heap::find_segment (object, small_heap_only); return !!hs; } void GCHeap::Promote(Object** ppObject, ScanContext* sc, uint32_t flags) { THREAD_NUMBER_FROM_CONTEXT; #ifndef MULTIPLE_HEAPS const int thread = 0; #endif //!MULTIPLE_HEAPS uint8_t* o = (uint8_t*)*ppObject; if (o == 0) return; #ifdef DEBUG_DestroyedHandleValue // we can race with destroy handle during concurrent scan if (o == (uint8_t*)DEBUG_DestroyedHandleValue) return; #endif //DEBUG_DestroyedHandleValue HEAP_FROM_THREAD; gc_heap* hp = gc_heap::heap_of (o); #ifdef USE_REGIONS if (!gc_heap::is_in_condemned (o)) #else //USE_REGIONS if ((o < hp->gc_low) || (o >= hp->gc_high)) #endif //USE_REGIONS { return; } dprintf (3, ("Promote %Ix", (size_t)o)); if (flags & GC_CALL_INTERIOR) { if ((o = hp->find_object (o)) == 0) { return; } } #ifdef FEATURE_CONSERVATIVE_GC // For conservative GC, a value on stack may point to middle of a free object. // In this case, we don't need to promote the pointer. if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree()) { return; } #endif #ifdef _DEBUG ((CObjectHeader*)o)->Validate(); #else UNREFERENCED_PARAMETER(sc); #endif //_DEBUG if (flags & GC_CALL_PINNED) hp->pin_object (o, (uint8_t**) ppObject); #ifdef STRESS_PINNING if ((++n_promote % 20) == 1) hp->pin_object (o, (uint8_t**) ppObject); #endif //STRESS_PINNING hpt->mark_object_simple (&o THREAD_NUMBER_ARG); STRESS_LOG_ROOT_PROMOTE(ppObject, o, o ? header(o)->GetMethodTable() : NULL); } void GCHeap::Relocate (Object** ppObject, ScanContext* sc, uint32_t flags) { UNREFERENCED_PARAMETER(sc); uint8_t* object = (uint8_t*)(Object*)(*ppObject); THREAD_NUMBER_FROM_CONTEXT; //dprintf (3, ("Relocate location %Ix\n", (size_t)ppObject)); dprintf (3, ("R: %Ix", (size_t)ppObject)); if (!object || !((object >= g_gc_lowest_address) && (object < g_gc_highest_address))) return; gc_heap* hp = gc_heap::heap_of (object); #ifdef _DEBUG if (!(flags & GC_CALL_INTERIOR)) { // We cannot validate this object if it's in the condemned gen because it could // be one of the objects that were overwritten by an artificial gap due to a pinned plug. #ifdef USE_REGIONS if (!gc_heap::is_in_condemned_gc (object)) #else //USE_REGIONS if (!((object >= hp->gc_low) && (object < hp->gc_high))) #endif //USE_REGIONS { ((CObjectHeader*)object)->Validate(FALSE); } } #endif //_DEBUG dprintf (3, ("Relocate %Ix\n", (size_t)object)); uint8_t* pheader; if ((flags & GC_CALL_INTERIOR) && gc_heap::settings.loh_compaction) { #ifdef USE_REGIONS if (!gc_heap::is_in_condemned_gc (object)) #else //USE_REGIONS if (!((object >= hp->gc_low) && (object < hp->gc_high))) #endif //USE_REGIONS { return; } if (gc_heap::loh_object_p (object)) { pheader = hp->find_object (object); if (pheader == 0) { return; } ptrdiff_t ref_offset = object - pheader; hp->relocate_address(&pheader THREAD_NUMBER_ARG); *ppObject = (Object*)(pheader + ref_offset); return; } } { pheader = object; hp->relocate_address(&pheader THREAD_NUMBER_ARG); *ppObject = (Object*)pheader; } STRESS_LOG_ROOT_RELOCATE(ppObject, object, pheader, ((!(flags & GC_CALL_INTERIOR)) ? ((Object*)object)->GetGCSafeMethodTable() : 0)); } /*static*/ bool GCHeap::IsLargeObject(Object *pObj) { return size( pObj ) >= loh_size_threshold; } #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way #ifdef STRESS_HEAP void StressHeapDummy (); // CLRRandom implementation can produce FPU exceptions if // the test/application run by CLR is enabling any FPU exceptions. // We want to avoid any unexpected exception coming from stress // infrastructure, so CLRRandom is not an option. // The code below is a replicate of CRT rand() implementation. // Using CRT rand() is not an option because we will interfere with the user application // that may also use it. int StressRNG(int iMaxValue) { static BOOL bisRandInit = FALSE; static int lHoldrand = 1L; if (!bisRandInit) { lHoldrand = (int)time(NULL); bisRandInit = TRUE; } int randValue = (((lHoldrand = lHoldrand * 214013L + 2531011L) >> 16) & 0x7fff); return randValue % iMaxValue; } #endif // STRESS_HEAP #endif // !FEATURE_REDHAWK // free up object so that things will move and then do a GC //return TRUE if GC actually happens, otherwise FALSE bool GCHeap::StressHeap(gc_alloc_context * context) { #if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK) alloc_context* acontext = static_cast<alloc_context*>(context); assert(context != nullptr); // if GC stress was dynamically disabled during this run we return FALSE if (!GCStressPolicy::IsEnabled()) return FALSE; #ifdef _DEBUG if (g_pConfig->FastGCStressLevel() && !GCToEEInterface::GetThread()->StressHeapIsEnabled()) { return FALSE; } #endif //_DEBUG if ((g_pConfig->GetGCStressLevel() & EEConfig::GCSTRESS_UNIQUE) #ifdef _DEBUG || g_pConfig->FastGCStressLevel() > 1 #endif //_DEBUG ) { if (!Thread::UniqueStack(&acontext)) { return FALSE; } } #ifdef BACKGROUND_GC // don't trigger a GC from the GC threads but still trigger GCs from user threads. if (GCToEEInterface::WasCurrentThreadCreatedByGC()) { return FALSE; } #endif //BACKGROUND_GC if (g_pStringClass == 0) { // If the String class has not been loaded, dont do any stressing. This should // be kept to a minimum to get as complete coverage as possible. _ASSERTE(g_fEEInit); return FALSE; } #ifndef MULTIPLE_HEAPS static int32_t OneAtATime = -1; // Only bother with this if the stress level is big enough and if nobody else is // doing it right now. Note that some callers are inside the AllocLock and are // guaranteed synchronized. But others are using AllocationContexts and have no // particular synchronization. // // For this latter case, we want a very high-speed way of limiting this to one // at a time. A secondary advantage is that we release part of our StressObjs // buffer sparingly but just as effectively. if (Interlocked::Increment(&OneAtATime) == 0 && !TrackAllocations()) // Messing with object sizes can confuse the profiler (see ICorProfilerInfo::GetObjectSize) { StringObject* str; // If the current string is used up if (HndFetchHandle(m_StressObjs[m_CurStressObj]) == 0) { // Populate handles with strings int i = m_CurStressObj; while(HndFetchHandle(m_StressObjs[i]) == 0) { _ASSERTE(m_StressObjs[i] != 0); unsigned strLen = ((unsigned)loh_size_threshold - 32) / sizeof(WCHAR); unsigned strSize = PtrAlign(StringObject::GetSize(strLen)); // update the cached type handle before allocating SetTypeHandleOnThreadForAlloc(TypeHandle(g_pStringClass)); str = (StringObject*) pGenGCHeap->allocate (strSize, acontext, /*flags*/ 0); if (str) { str->SetMethodTable (g_pStringClass); str->SetStringLength (strLen); HndAssignHandle(m_StressObjs[i], ObjectToOBJECTREF(str)); } i = (i + 1) % NUM_HEAP_STRESS_OBJS; if (i == m_CurStressObj) break; } // advance the current handle to the next string m_CurStressObj = (m_CurStressObj + 1) % NUM_HEAP_STRESS_OBJS; } // Get the current string str = (StringObject*) OBJECTREFToObject(HndFetchHandle(m_StressObjs[m_CurStressObj])); if (str) { // Chop off the end of the string and form a new object out of it. // This will 'free' an object at the beginning of the heap, which will // force data movement. Note that we can only do this so many times. // before we have to move on to the next string. unsigned sizeOfNewObj = (unsigned)Align(min_obj_size * 31); if (str->GetStringLength() > sizeOfNewObj / sizeof(WCHAR)) { unsigned sizeToNextObj = (unsigned)Align(size(str)); uint8_t* freeObj = ((uint8_t*) str) + sizeToNextObj - sizeOfNewObj; pGenGCHeap->make_unused_array (freeObj, sizeOfNewObj); #if !defined(TARGET_AMD64) && !defined(TARGET_X86) // ensure that the write to the new free object is seen by // background GC *before* the write to the string length below MemoryBarrier(); #endif str->SetStringLength(str->GetStringLength() - (sizeOfNewObj / sizeof(WCHAR))); } else { // Let the string itself become garbage. // will be realloced next time around HndAssignHandle(m_StressObjs[m_CurStressObj], 0); } } } Interlocked::Decrement(&OneAtATime); #endif // !MULTIPLE_HEAPS if (IsConcurrentGCEnabled()) { int rgen = StressRNG(10); // gen0:gen1:gen2 distribution: 40:40:20 if (rgen >= 8) rgen = 2; else if (rgen >= 4) rgen = 1; else rgen = 0; GarbageCollectTry (rgen, FALSE, collection_gcstress); } else { GarbageCollect(max_generation, FALSE, collection_gcstress); } return TRUE; #else UNREFERENCED_PARAMETER(context); return FALSE; #endif //STRESS_HEAP && !FEATURE_REDHAWK } #ifdef FEATURE_PREMORTEM_FINALIZATION #define REGISTER_FOR_FINALIZATION(_object, _size) \ hp->finalize_queue->RegisterForFinalization (0, (_object), (_size)) #else // FEATURE_PREMORTEM_FINALIZATION #define REGISTER_FOR_FINALIZATION(_object, _size) true #endif // FEATURE_PREMORTEM_FINALIZATION #define CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(_object, _size, _register) do { \ if ((_object) == NULL || ((_register) && !REGISTER_FOR_FINALIZATION(_object, _size))) \ { \ STRESS_LOG_OOM_STACK(_size); \ return NULL; \ } \ } while (false) #ifdef FEATURE_64BIT_ALIGNMENT // Allocate small object with an alignment requirement of 8-bytes. Object* AllocAlign8(alloc_context* acontext, gc_heap* hp, size_t size, uint32_t flags) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; Object* newAlloc = NULL; // Depending on where in the object the payload requiring 8-byte alignment resides we might have to // align the object header on an 8-byte boundary or midway between two such boundaries. The unaligned // case is indicated to the GC via the GC_ALLOC_ALIGN8_BIAS flag. size_t desiredAlignment = (flags & GC_ALLOC_ALIGN8_BIAS) ? 4 : 0; // Retrieve the address of the next allocation from the context (note that we're inside the alloc // lock at this point). uint8_t* result = acontext->alloc_ptr; // Will an allocation at this point yield the correct alignment and fit into the remainder of the // context? if ((((size_t)result & 7) == desiredAlignment) && ((result + size) <= acontext->alloc_limit)) { // Yes, we can just go ahead and make the allocation. newAlloc = (Object*) hp->allocate (size, acontext, flags); ASSERT(((size_t)newAlloc & 7) == desiredAlignment); } else { // No, either the next available address is not aligned in the way we require it or there's // not enough space to allocate an object of the required size. In both cases we allocate a // padding object (marked as a free object). This object's size is such that it will reverse // the alignment of the next header (asserted below). // // We allocate both together then decide based on the result whether we'll format the space as // free object + real object or real object + free object. ASSERT((Align(min_obj_size) & 7) == 4); CObjectHeader *freeobj = (CObjectHeader*) hp->allocate (Align(size) + Align(min_obj_size), acontext, flags); if (freeobj) { if (((size_t)freeobj & 7) == desiredAlignment) { // New allocation has desired alignment, return this one and place the free object at the // end of the allocated space. newAlloc = (Object*)freeobj; freeobj = (CObjectHeader*)((uint8_t*)freeobj + Align(size)); } else { // New allocation is still mis-aligned, format the initial space as a free object and the // rest of the space should be correctly aligned for the real object. newAlloc = (Object*)((uint8_t*)freeobj + Align(min_obj_size)); ASSERT(((size_t)newAlloc & 7) == desiredAlignment); if (flags & GC_ALLOC_ZEROING_OPTIONAL) { // clean the syncblock of the aligned object. *(((PTR_PTR)newAlloc)-1) = 0; } } freeobj->SetFree(min_obj_size); } } return newAlloc; } #endif // FEATURE_64BIT_ALIGNMENT Object* GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_DCL) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; TRIGGERSGC(); Object* newAlloc = NULL; alloc_context* acontext = static_cast<alloc_context*>(context); #ifdef MULTIPLE_HEAPS if (acontext->get_alloc_heap() == 0) { AssignHeap (acontext); assert (acontext->get_alloc_heap()); } gc_heap* hp = acontext->get_alloc_heap()->pGenGCHeap; #else gc_heap* hp = pGenGCHeap; #ifdef _PREFAST_ // prefix complains about us dereferencing hp in wks build even though we only access static members // this way. not sure how to shut it up except for this ugly workaround: PREFIX_ASSUME(hp != NULL); #endif //_PREFAST_ #endif //MULTIPLE_HEAPS assert(size < loh_size_threshold || (flags & GC_ALLOC_LARGE_OBJECT_HEAP)); if (flags & GC_ALLOC_USER_OLD_HEAP) { // The LOH always guarantees at least 8-byte alignment, regardless of platform. Moreover it doesn't // support mis-aligned object headers so we can't support biased headers. Luckily for us // we've managed to arrange things so the only case where we see a bias is for boxed value types and // these can never get large enough to be allocated on the LOH. ASSERT((flags & GC_ALLOC_ALIGN8_BIAS) == 0); ASSERT(65536 < loh_size_threshold); int gen_num = (flags & GC_ALLOC_PINNED_OBJECT_HEAP) ? poh_generation : loh_generation; newAlloc = (Object*) hp->allocate_uoh_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), flags, gen_num, acontext->alloc_bytes_uoh); ASSERT(((size_t)newAlloc & 7) == 0); #ifdef MULTIPLE_HEAPS if (flags & GC_ALLOC_FINALIZE) { // the heap may have changed due to heap balancing - it's important // to register the object for finalization on the heap it was allocated on hp = gc_heap::heap_of ((uint8_t*)newAlloc); } #endif //MULTIPLE_HEAPS #ifdef FEATURE_STRUCTALIGN newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size); #endif // FEATURE_STRUCTALIGN } else { #ifdef FEATURE_64BIT_ALIGNMENT if (flags & GC_ALLOC_ALIGN8) { newAlloc = AllocAlign8 (acontext, hp, size, flags); } else #else assert ((flags & GC_ALLOC_ALIGN8) == 0); #endif { newAlloc = (Object*) hp->allocate (size + ComputeMaxStructAlignPad(requiredAlignment), acontext, flags); } #ifdef MULTIPLE_HEAPS if (flags & GC_ALLOC_FINALIZE) { // the heap may have changed due to heap balancing - it's important // to register the object for finalization on the heap it was allocated on hp = acontext->get_alloc_heap()->pGenGCHeap; assert ((newAlloc == nullptr) || (hp == gc_heap::heap_of ((uint8_t*)newAlloc))); } #endif //MULTIPLE_HEAPS #ifdef FEATURE_STRUCTALIGN newAlloc = (Object*) hp->pad_for_alignment ((uint8_t*) newAlloc, requiredAlignment, size, acontext); #endif // FEATURE_STRUCTALIGN } CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE); #ifdef USE_REGIONS assert (IsHeapPointer (newAlloc)); #endif //USE_REGIONS return newAlloc; } void GCHeap::FixAllocContext (gc_alloc_context* context, void* arg, void *heap) { alloc_context* acontext = static_cast<alloc_context*>(context); #ifdef MULTIPLE_HEAPS if (arg != 0) acontext->alloc_count = 0; uint8_t * alloc_ptr = acontext->alloc_ptr; if (!alloc_ptr) return; // The acontext->alloc_heap can be out of sync with the ptrs because // of heap re-assignment in allocate gc_heap* hp = gc_heap::heap_of (alloc_ptr); #else gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS if (heap == NULL || heap == hp) { hp->fix_allocation_context (acontext, ((arg != 0)? TRUE : FALSE), TRUE); } } Object* GCHeap::GetContainingObject (void *pInteriorPtr, bool fCollectedGenOnly) { uint8_t *o = (uint8_t*)pInteriorPtr; gc_heap* hp = gc_heap::heap_of (o); #ifdef USE_REGIONS if (fCollectedGenOnly) { if (!gc_heap::is_in_condemned (o)) { return NULL; } } else { if (!((o >= g_gc_lowest_address) && (o < g_gc_highest_address))) return NULL; } #else //USE_REGIONS uint8_t* lowest = (fCollectedGenOnly ? hp->gc_low : hp->lowest_address); uint8_t* highest = (fCollectedGenOnly ? hp->gc_high : hp->highest_address); if (!((o >= lowest) && (o < highest))) { return NULL; } #endif //USE_REGIONS return (Object*)(hp->find_object (o)); } BOOL should_collect_optimized (dynamic_data* dd, BOOL low_memory_p) { if (dd_new_allocation (dd) < 0) { return TRUE; } if (((float)(dd_new_allocation (dd)) / (float)dd_desired_allocation (dd)) < (low_memory_p ? 0.7 : 0.3)) { return TRUE; } return FALSE; } //---------------------------------------------------------------------------- // #GarbageCollector // // API to ensure that a complete new garbage collection takes place // HRESULT GCHeap::GarbageCollect (int generation, bool low_memory_p, int mode) { #if defined(HOST_64BIT) if (low_memory_p) { size_t total_allocated = 0; size_t total_desired = 0; #ifdef MULTIPLE_HEAPS int hn = 0; for (hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; total_desired += dd_desired_allocation (hp->dynamic_data_of (0)); total_allocated += dd_desired_allocation (hp->dynamic_data_of (0))- dd_new_allocation (hp->dynamic_data_of (0)); } #else gc_heap* hp = pGenGCHeap; total_desired = dd_desired_allocation (hp->dynamic_data_of (0)); total_allocated = dd_desired_allocation (hp->dynamic_data_of (0))- dd_new_allocation (hp->dynamic_data_of (0)); #endif //MULTIPLE_HEAPS if ((total_desired > gc_heap::mem_one_percent) && (total_allocated < gc_heap::mem_one_percent)) { dprintf (2, ("Async low mem but we've only allocated %d (< 10%% of physical mem) out of %d, returning", total_allocated, total_desired)); return S_OK; } } #endif // HOST_64BIT #ifdef MULTIPLE_HEAPS gc_heap* hpt = gc_heap::g_heaps[0]; #else gc_heap* hpt = 0; #endif //MULTIPLE_HEAPS generation = (generation < 0) ? max_generation : min (generation, max_generation); dynamic_data* dd = hpt->dynamic_data_of (generation); #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { if ((mode == collection_optimized) || (mode & collection_non_blocking)) { return S_OK; } if (mode & collection_blocking) { pGenGCHeap->background_gc_wait(); if (mode & collection_optimized) { return S_OK; } } } #endif //BACKGROUND_GC if (mode & collection_optimized) { if (pGenGCHeap->gc_started) { return S_OK; } else { BOOL should_collect = FALSE; BOOL should_check_uoh = (generation == max_generation); #ifdef MULTIPLE_HEAPS for (int heap_number = 0; heap_number < gc_heap::n_heaps; heap_number++) { dynamic_data* dd1 = gc_heap::g_heaps [heap_number]->dynamic_data_of (generation); should_collect = should_collect_optimized (dd1, low_memory_p); if (should_check_uoh) { for (int i = uoh_start_generation; i < total_generation_count && !should_collect; i++) { should_collect = should_collect_optimized (gc_heap::g_heaps [heap_number]->dynamic_data_of (i), low_memory_p); } } if (should_collect) break; } #else should_collect = should_collect_optimized (dd, low_memory_p); if (should_check_uoh) { for (int i = uoh_start_generation; i < total_generation_count && !should_collect; i++) { should_collect = should_collect_optimized (hpt->dynamic_data_of (i), low_memory_p); } } #endif //MULTIPLE_HEAPS if (!should_collect) { return S_OK; } } } size_t CollectionCountAtEntry = dd_collection_count (dd); size_t BlockingCollectionCountAtEntry = gc_heap::full_gc_counts[gc_type_blocking]; size_t CurrentCollectionCount = 0; retry: CurrentCollectionCount = GarbageCollectTry(generation, low_memory_p, mode); if ((mode & collection_blocking) && (generation == max_generation) && (gc_heap::full_gc_counts[gc_type_blocking] == BlockingCollectionCountAtEntry)) { #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { pGenGCHeap->background_gc_wait(); } #endif //BACKGROUND_GC goto retry; } if (CollectionCountAtEntry == CurrentCollectionCount) { goto retry; } return S_OK; } size_t GCHeap::GarbageCollectTry (int generation, BOOL low_memory_p, int mode) { int gen = (generation < 0) ? max_generation : min (generation, max_generation); gc_reason reason = reason_empty; if (low_memory_p) { if (mode & collection_blocking) { reason = reason_lowmemory_blocking; } else { reason = reason_lowmemory; } } else { reason = reason_induced; } if (reason == reason_induced) { if (mode & collection_compacting) { reason = reason_induced_compacting; } else if (mode & collection_non_blocking) { reason = reason_induced_noforce; } #ifdef STRESS_HEAP else if (mode & collection_gcstress) { reason = reason_gcstress; } #endif } return GarbageCollectGeneration (gen, reason); } #ifdef BACKGROUND_GC void gc_heap::add_bgc_pause_duration_0() { if (settings.concurrent) { uint64_t suspended_end_ts = GetHighPrecisionTimeStamp(); size_t pause_duration = (size_t)(suspended_end_ts - suspended_start_time); last_recorded_gc_info* last_gc_info = &(last_bgc_info[last_bgc_info_index]); last_gc_info->pause_durations[0] = pause_duration; if (last_gc_info->index < last_ephemeral_gc_info.index) { last_gc_info->pause_durations[0] -= last_ephemeral_gc_info.pause_durations[0]; } total_suspended_time += last_gc_info->pause_durations[0]; } } last_recorded_gc_info* gc_heap::get_completed_bgc_info() { int completed_bgc_index = gc_heap::background_running_p() ? (int)(!(gc_heap::last_bgc_info_index)) : (int)gc_heap::last_bgc_info_index; return &gc_heap::last_bgc_info[completed_bgc_index]; } #endif //BACKGROUND_GC void gc_heap::do_pre_gc() { STRESS_LOG_GC_STACK; #ifdef STRESS_LOG STRESS_LOG_GC_START(VolatileLoad(&settings.gc_index), (uint32_t)settings.condemned_generation, (uint32_t)settings.reason); #endif // STRESS_LOG #ifdef MULTIPLE_HEAPS gc_heap* hp = g_heaps[0]; #else gc_heap* hp = 0; #endif //MULTIPLE_HEAPS #ifdef BACKGROUND_GC settings.b_state = hp->current_bgc_state; if (settings.concurrent) { last_bgc_info_index = !last_bgc_info_index; last_bgc_info[last_bgc_info_index].index = settings.gc_index; } #endif //BACKGROUND_GC #ifdef TRACE_GC size_t total_allocated_since_last_gc = get_total_allocated_since_last_gc(); #ifdef BACKGROUND_GC dprintf (1, (ThreadStressLog::gcDetailedStartMsg(), VolatileLoad(&settings.gc_index), dd_collection_count (hp->dynamic_data_of (0)), settings.condemned_generation, total_allocated_since_last_gc, (settings.concurrent ? "BGC" : (gc_heap::background_running_p() ? "FGC" : "NGC")), settings.b_state)); #else dprintf (1, ("*GC* %d(gen0:%d)(%d)(alloc: %Id)", VolatileLoad(&settings.gc_index), dd_collection_count(hp->dynamic_data_of(0)), settings.condemned_generation, total_allocated_since_last_gc)); #endif //BACKGROUND_GC if (heap_hard_limit) { size_t total_heap_committed = get_total_committed_size(); size_t total_heap_committed_recorded = current_total_committed - current_total_committed_bookkeeping; dprintf (1, ("(%d)GC commit BEG #%Id: %Id (recorded: %Id = %Id-%Id)", settings.condemned_generation, (size_t)settings.gc_index, total_heap_committed, total_heap_committed_recorded, current_total_committed, current_total_committed_bookkeeping)); } #endif //TRACE_GC GCHeap::UpdatePreGCCounters(); #if defined(__linux__) GCToEEInterface::UpdateGCEventStatus(static_cast<int>(GCEventStatus::GetEnabledLevel(GCEventProvider_Default)), static_cast<int>(GCEventStatus::GetEnabledKeywords(GCEventProvider_Default)), static_cast<int>(GCEventStatus::GetEnabledLevel(GCEventProvider_Private)), static_cast<int>(GCEventStatus::GetEnabledKeywords(GCEventProvider_Private))); #endif // __linux__ if (settings.concurrent) { #ifdef BACKGROUND_GC full_gc_counts[gc_type_background]++; #endif // BACKGROUND_GC } else { if (settings.condemned_generation == max_generation) { full_gc_counts[gc_type_blocking]++; } else { #ifdef BACKGROUND_GC if (settings.background_p) { ephemeral_fgc_counts[settings.condemned_generation]++; } #endif //BACKGROUND_GC } } } #ifdef GC_CONFIG_DRIVEN void gc_heap::record_interesting_info_per_heap() { // datapoints are always from the last blocking GC so don't record again // for BGCs. if (!(settings.concurrent)) { for (int i = 0; i < max_idp_count; i++) { interesting_data_per_heap[i] += interesting_data_per_gc[i]; } } int compact_reason = get_gc_data_per_heap()->get_mechanism (gc_heap_compact); if (compact_reason >= 0) (compact_reasons_per_heap[compact_reason])++; int expand_mechanism = get_gc_data_per_heap()->get_mechanism (gc_heap_expand); if (expand_mechanism >= 0) (expand_mechanisms_per_heap[expand_mechanism])++; for (int i = 0; i < max_gc_mechanism_bits_count; i++) { if (get_gc_data_per_heap()->is_mechanism_bit_set ((gc_mechanism_bit_per_heap)i)) (interesting_mechanism_bits_per_heap[i])++; } // h# | GC | gen | C | EX | NF | BF | ML | DM || PreS | PostS | Merge | Conv | Pre | Post | PrPo | PreP | PostP | cprintf (("%2d | %6d | %1d | %1s | %2s | %2s | %2s | %2s | %2s || %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id |", heap_number, (size_t)settings.gc_index, settings.condemned_generation, // TEMP - I am just doing this for wks GC 'cause I wanna see the pattern of doing C/S GCs. (settings.compaction ? (((compact_reason >= 0) && gc_heap_compact_reason_mandatory_p[compact_reason]) ? "M" : "W") : ""), // compaction ((expand_mechanism >= 0)? "X" : ""), // EX ((expand_mechanism == expand_reuse_normal) ? "X" : ""), // NF ((expand_mechanism == expand_reuse_bestfit) ? "X" : ""), // BF (get_gc_data_per_heap()->is_mechanism_bit_set (gc_mark_list_bit) ? "X" : ""), // ML (get_gc_data_per_heap()->is_mechanism_bit_set (gc_demotion_bit) ? "X" : ""), // DM interesting_data_per_gc[idp_pre_short], interesting_data_per_gc[idp_post_short], interesting_data_per_gc[idp_merged_pin], interesting_data_per_gc[idp_converted_pin], interesting_data_per_gc[idp_pre_pin], interesting_data_per_gc[idp_post_pin], interesting_data_per_gc[idp_pre_and_post_pin], interesting_data_per_gc[idp_pre_short_padded], interesting_data_per_gc[idp_post_short_padded])); } void gc_heap::record_global_mechanisms() { for (int i = 0; i < max_global_mechanisms_count; i++) { if (gc_data_global.get_mechanism_p ((gc_global_mechanism_p)i)) { ::record_global_mechanism (i); } } } BOOL gc_heap::should_do_sweeping_gc (BOOL compact_p) { if (!compact_ratio) return (!compact_p); size_t compact_count = compact_or_sweep_gcs[0]; size_t sweep_count = compact_or_sweep_gcs[1]; size_t total_count = compact_count + sweep_count; BOOL should_compact = compact_p; if (total_count > 3) { if (compact_p) { int temp_ratio = (int)((compact_count + 1) * 100 / (total_count + 1)); if (temp_ratio > compact_ratio) { // cprintf (("compact would be: %d, total_count: %d, ratio would be %d%% > target\n", // (compact_count + 1), (total_count + 1), temp_ratio)); should_compact = FALSE; } } else { int temp_ratio = (int)((sweep_count + 1) * 100 / (total_count + 1)); if (temp_ratio > (100 - compact_ratio)) { // cprintf (("sweep would be: %d, total_count: %d, ratio would be %d%% > target\n", // (sweep_count + 1), (total_count + 1), temp_ratio)); should_compact = TRUE; } } } return !should_compact; } #endif //GC_CONFIG_DRIVEN #ifdef BGC_SERVO_TUNING // virtual_fl_size is only used for NGC2 void gc_heap::check_and_adjust_bgc_tuning (int gen_number, size_t physical_size, ptrdiff_t virtual_fl_size) { // For LOH we need to check more often to catch things like when the size grows too much. int min_gen_to_check = ((gen_number == max_generation) ? (max_generation - 1) : 0); if (settings.condemned_generation >= min_gen_to_check) { #ifdef MULTIPLE_HEAPS gc_heap* hp = g_heaps[0]; #else gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS size_t total_gen_size = physical_size; size_t total_generation_fl_size = get_total_generation_fl_size (gen_number); double gen_flr = (double)total_generation_fl_size * 100.0 / (double)total_gen_size; size_t gen1_index = dd_collection_count (hp->dynamic_data_of (max_generation - 1)); size_t gen2_index = dd_collection_count (hp->dynamic_data_of (max_generation)); bgc_tuning::tuning_calculation* current_gen_calc = &bgc_tuning::gen_calc[gen_number - max_generation]; bgc_tuning::tuning_stats* current_gen_stats = &bgc_tuning::gen_stats[gen_number - max_generation]; bool gen_size_inc_p = (total_gen_size > current_gen_calc->last_bgc_size); if ((settings.condemned_generation >= min_gen_to_check) && (settings.condemned_generation != max_generation)) { if (gen_size_inc_p) { current_gen_stats->last_gen_increase_flr = gen_flr; dprintf (BGC_TUNING_LOG, ("BTLp[g1: %Id, g2: %Id]: gen%d size inc %s %Id->%Id, flr: %.3f", gen1_index, gen2_index, gen_number, (gc_heap::background_running_p() ? "during bgc" : ""), current_gen_stats->last_bgc_physical_size, total_gen_size, gen_flr)); } if (!bgc_tuning::fl_tuning_triggered) { if (bgc_tuning::enable_fl_tuning) { if (!((gc_heap::background_running_p() || (hp->current_bgc_state == bgc_initialized)))) { assert (settings.entry_memory_load); // We start when we are 2/3 way there so we don't overshoot. if ((settings.entry_memory_load >= (bgc_tuning::memory_load_goal * 2 / 3)) && (full_gc_counts[gc_type_background] >= 2)) { bgc_tuning::next_bgc_p = true; current_gen_calc->first_alloc_to_trigger = get_total_servo_alloc (gen_number); dprintf (BGC_TUNING_LOG, ("BTL[g1: %Id] mem high enough: %d(goal: %d), gen%d fl alloc: %Id, trigger BGC!", gen1_index, settings.entry_memory_load, bgc_tuning::memory_load_goal, gen_number, current_gen_calc->first_alloc_to_trigger)); } } } } } if ((settings.condemned_generation == max_generation) && !(settings.concurrent)) { size_t total_survived = get_total_surv_size (gen_number); size_t total_begin = get_total_begin_data_size (gen_number); double current_gc_surv_rate = (double)total_survived * 100.0 / (double)total_begin; // calculate the adjusted gen_flr. double total_virtual_size = (double)physical_size + (double)virtual_fl_size; double total_fl_size = (double)total_generation_fl_size + (double)virtual_fl_size; double new_gen_flr = total_fl_size * 100.0 / total_virtual_size; dprintf (BGC_TUNING_LOG, ("BTL%d NGC2 size %Id->%Id, fl %Id(%.3f)->%Id(%.3f)", gen_number, physical_size, (size_t)total_virtual_size, total_generation_fl_size, gen_flr, (size_t)total_fl_size, new_gen_flr)); dprintf (BGC_TUNING_LOG, ("BTL%d* %Id, %.3f, %.3f, %.3f, %.3f, %.3f, %Id, %Id, %Id, %Id", gen_number, (size_t)total_virtual_size, 0.0, 0.0, new_gen_flr, current_gen_stats->last_gen_increase_flr, current_gc_surv_rate, 0, 0, 0, current_gen_calc->alloc_to_trigger)); bgc_tuning::gen1_index_last_bgc_end = gen1_index; current_gen_calc->last_bgc_size = total_gen_size; current_gen_calc->last_bgc_flr = new_gen_flr; current_gen_calc->last_sweep_above_p = false; current_gen_calc->last_bgc_end_alloc = 0; current_gen_stats->last_alloc_end_to_start = 0; current_gen_stats->last_alloc_start_to_sweep = 0; current_gen_stats->last_alloc_sweep_to_end = 0; current_gen_stats->last_bgc_fl_size = total_generation_fl_size; current_gen_stats->last_bgc_surv_rate = current_gc_surv_rate; current_gen_stats->last_gen_increase_flr = 0; } } } void gc_heap::get_and_reset_loh_alloc_info() { if (!bgc_tuning::enable_fl_tuning) return; total_loh_a_last_bgc = 0; uint64_t total_loh_a_no_bgc = 0; uint64_t total_loh_a_bgc_marking = 0; uint64_t total_loh_a_bgc_planning = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS total_loh_a_no_bgc += hp->loh_a_no_bgc; hp->loh_a_no_bgc = 0; total_loh_a_bgc_marking += hp->loh_a_bgc_marking; hp->loh_a_bgc_marking = 0; total_loh_a_bgc_planning += hp->loh_a_bgc_planning; hp->loh_a_bgc_planning = 0; } dprintf (2, ("LOH alloc: outside bgc: %I64d; bm: %I64d; bp: %I64d", total_loh_a_no_bgc, total_loh_a_bgc_marking, total_loh_a_bgc_planning)); total_loh_a_last_bgc = total_loh_a_no_bgc + total_loh_a_bgc_marking + total_loh_a_bgc_planning; } #endif //BGC_SERVO_TUNING bool gc_heap::is_pm_ratio_exceeded() { size_t maxgen_frag = 0; size_t maxgen_size = 0; size_t total_heap_size = get_total_heap_size(); #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS maxgen_frag += dd_fragmentation (hp->dynamic_data_of (max_generation)); maxgen_size += hp->generation_size (max_generation); } double maxgen_ratio = (double)maxgen_size / (double)total_heap_size; double maxgen_frag_ratio = (double)maxgen_frag / (double)maxgen_size; dprintf (GTC_LOG, ("maxgen %Id(%d%% total heap), frag: %Id (%d%% maxgen)", maxgen_size, (int)(maxgen_ratio * 100.0), maxgen_frag, (int)(maxgen_frag_ratio * 100.0))); bool maxgen_highfrag_p = ((maxgen_ratio > 0.5) && (maxgen_frag_ratio > 0.1)); // We need to adjust elevation here because if there's enough fragmentation it's not // unproductive. if (maxgen_highfrag_p) { settings.should_lock_elevation = FALSE; dprintf (GTC_LOG, ("high frag gen2, turn off elevation")); } return maxgen_highfrag_p; } void gc_heap::update_recorded_gen_data (last_recorded_gc_info* gc_info) { memset (gc_info->gen_info, 0, sizeof (gc_info->gen_info)); #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS gc_history_per_heap* current_gc_data_per_heap = hp->get_gc_data_per_heap(); for (int gen_number = 0; gen_number < total_generation_count; gen_number++) { recorded_generation_info* recorded_info = &(gc_info->gen_info[gen_number]); gc_generation_data* data = &(current_gc_data_per_heap->gen_data[gen_number]); recorded_info->size_before += data->size_before; recorded_info->fragmentation_before += data->free_list_space_before + data->free_obj_space_before; recorded_info->size_after += data->size_after; recorded_info->fragmentation_after += data->free_list_space_after + data->free_obj_space_after; } } } void gc_heap::do_post_gc() { #ifdef MULTIPLE_HEAPS gc_heap* hp = g_heaps[0]; #else gc_heap* hp = 0; #endif //MULTIPLE_HEAPS GCToEEInterface::GcDone(settings.condemned_generation); GCToEEInterface::DiagGCEnd(VolatileLoad(&settings.gc_index), (uint32_t)settings.condemned_generation, (uint32_t)settings.reason, !!settings.concurrent); add_to_history(); uint32_t current_memory_load = 0; #ifdef BGC_SERVO_TUNING if (bgc_tuning::enable_fl_tuning) { uint64_t current_available_physical = 0; size_t gen2_physical_size = 0; size_t gen3_physical_size = 0; ptrdiff_t gen2_virtual_fl_size = 0; ptrdiff_t gen3_virtual_fl_size = 0; ptrdiff_t vfl_from_kp = 0; ptrdiff_t vfl_from_ki = 0; gen2_physical_size = get_total_generation_size (max_generation); gen3_physical_size = get_total_generation_size (loh_generation); get_memory_info (&current_memory_load, &current_available_physical); if ((settings.condemned_generation == max_generation) && !settings.concurrent) { double gen2_size_ratio = (double)gen2_physical_size / ((double)gen2_physical_size + (double)gen3_physical_size); double total_virtual_fl_size = bgc_tuning::calculate_ml_tuning (current_available_physical, true, &vfl_from_kp, &vfl_from_ki); gen2_virtual_fl_size = (ptrdiff_t)(total_virtual_fl_size * gen2_size_ratio); gen3_virtual_fl_size = (ptrdiff_t)(total_virtual_fl_size * (1.0 - gen2_size_ratio)); #ifdef SIMPLE_DPRINTF dprintf (BGC_TUNING_LOG, ("BTL: ml: %d (g: %d)(%s), a: %I64d (g: %I64d, elg: %Id+%Id=%Id, %Id+%Id=%Id), vfl: %Id=%Id+%Id(NGC2)", current_memory_load, bgc_tuning::memory_load_goal, ((current_available_physical > bgc_tuning::available_memory_goal) ? "above" : "below"), current_available_physical, bgc_tuning::available_memory_goal, gen2_physical_size, gen2_virtual_fl_size, (gen2_physical_size + gen2_virtual_fl_size), gen3_physical_size, gen3_virtual_fl_size, (gen3_physical_size + gen3_virtual_fl_size), (ptrdiff_t)total_virtual_fl_size, vfl_from_kp, vfl_from_ki)); #endif //SIMPLE_DPRINTF } check_and_adjust_bgc_tuning (max_generation, gen2_physical_size, gen2_virtual_fl_size); check_and_adjust_bgc_tuning (loh_generation, gen3_physical_size, gen3_virtual_fl_size); } #endif //BGC_SERVO_TUNING dprintf (1, (ThreadStressLog::gcDetailedEndMsg(), VolatileLoad(&settings.gc_index), dd_collection_count(hp->dynamic_data_of(0)), (size_t)(GetHighPrecisionTimeStamp() / 1000), settings.condemned_generation, (settings.concurrent ? "BGC" : (gc_heap::background_running_p() ? "FGC" : "NGC")), (settings.compaction ? "C" : "S"), (settings.promotion ? "P" : "S"), settings.entry_memory_load, current_memory_load)); // Now record the gc info. last_recorded_gc_info* last_gc_info = 0; #ifdef BACKGROUND_GC if (settings.concurrent) { last_gc_info = &last_bgc_info[last_bgc_info_index]; assert (last_gc_info->index == settings.gc_index); } else #endif //BACKGROUND_GC { last_gc_info = ((settings.condemned_generation == max_generation) ? &last_full_blocking_gc_info : &last_ephemeral_gc_info); last_gc_info->index = settings.gc_index; } size_t total_heap_committed = get_total_committed_size(); last_gc_info->total_committed = total_heap_committed; last_gc_info->promoted = get_total_promoted(); last_gc_info->pinned_objects = get_total_pinned_objects(); last_gc_info->finalize_promoted_objects = GCHeap::GetFinalizablePromotedCount(); if (!settings.concurrent) { // If it's a normal blocking GC with its own SuspendEE, we simply get the elapsed time recoreded // and add the time between SuspendEE start and GC start. dynamic_data* dd = hp->dynamic_data_of (settings.condemned_generation); uint64_t gc_start_ts = dd_time_clock (dd); size_t pause_duration = (size_t)(end_gc_time - dd_time_clock (dd)); #ifdef BACKGROUND_GC if ((hp->current_bgc_state != bgc_initialized) && (settings.reason != reason_pm_full_gc)) { pause_duration += (size_t)(gc_start_ts - suspended_start_time); } #endif //BACKGROUND_GC last_gc_info->pause_durations[0] = pause_duration; total_suspended_time += pause_duration; last_gc_info->pause_durations[1] = 0; } uint64_t total_process_time = end_gc_time - process_start_time; last_gc_info->pause_percentage = (float)(total_process_time ? ((double)total_suspended_time / (double)total_process_time * 100.0) : 0); update_recorded_gen_data (last_gc_info); last_gc_info->heap_size = get_total_heap_size(); last_gc_info->fragmentation = get_total_fragmentation(); if (settings.exit_memory_load != 0) last_gc_info->memory_load = settings.exit_memory_load; else if (settings.entry_memory_load != 0) last_gc_info->memory_load = settings.entry_memory_load; last_gc_info->condemned_generation = settings.condemned_generation; last_gc_info->compaction = settings.compaction; last_gc_info->concurrent = settings.concurrent; #ifdef BACKGROUND_GC is_last_recorded_bgc = settings.concurrent; #endif //BACKGROUND_GC #ifdef TRACE_GC if (heap_hard_limit) { size_t total_heap_committed_recorded = current_total_committed - current_total_committed_bookkeeping; dprintf (1, ("(%d)GC commit END #%Id: %Id (recorded: %Id=%Id-%Id), heap %Id, frag: %Id", settings.condemned_generation, (size_t)settings.gc_index, total_heap_committed, total_heap_committed_recorded, current_total_committed, current_total_committed_bookkeeping, last_gc_info->heap_size, last_gc_info->fragmentation)); } #endif //TRACE_GC // Note we only do this at the end of full blocking GCs because we do not want // to turn on this provisional mode during the middle of a BGC. if ((settings.condemned_generation == max_generation) && (!settings.concurrent)) { if (pm_stress_on) { size_t full_compacting_gc_count = full_gc_counts[gc_type_compacting]; if (provisional_mode_triggered) { uint64_t r = gc_rand::get_rand(10); if ((full_compacting_gc_count - provisional_triggered_gc_count) >= r) { provisional_mode_triggered = false; provisional_off_gc_count = full_compacting_gc_count; dprintf (GTC_LOG, ("%Id NGC2s when turned on, %Id NGCs since(%Id)", provisional_triggered_gc_count, (full_compacting_gc_count - provisional_triggered_gc_count), num_provisional_triggered)); } } else { uint64_t r = gc_rand::get_rand(5); if ((full_compacting_gc_count - provisional_off_gc_count) >= r) { provisional_mode_triggered = true; provisional_triggered_gc_count = full_compacting_gc_count; num_provisional_triggered++; dprintf (GTC_LOG, ("%Id NGC2s when turned off, %Id NGCs since(%Id)", provisional_off_gc_count, (full_compacting_gc_count - provisional_off_gc_count), num_provisional_triggered)); } } } else { if (provisional_mode_triggered) { if ((settings.entry_memory_load < high_memory_load_th) || !is_pm_ratio_exceeded()) { dprintf (GTC_LOG, ("turning off PM")); provisional_mode_triggered = false; } } else if ((settings.entry_memory_load >= high_memory_load_th) && is_pm_ratio_exceeded()) { dprintf (GTC_LOG, ("highmem && highfrag - turning on PM")); provisional_mode_triggered = true; num_provisional_triggered++; } } } GCHeap::UpdatePostGCCounters(); // We need to reinitialize the number of pinned objects because it's used in the GCHeapStats // event fired in GCHeap::UpdatePostGCCounters. For BGC, we will get that event following an // FGC's GCHeapStats and we wouldn't want that FGC's info to carry over to the BGC. reinit_pinned_objects(); #ifdef STRESS_LOG STRESS_LOG_GC_END(VolatileLoad(&settings.gc_index), (uint32_t)settings.condemned_generation, (uint32_t)settings.reason); #endif // STRESS_LOG #ifdef GC_CONFIG_DRIVEN if (!settings.concurrent) { if (settings.compaction) (compact_or_sweep_gcs[0])++; else (compact_or_sweep_gcs[1])++; } #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) g_heaps[i]->record_interesting_info_per_heap(); #else record_interesting_info_per_heap(); #endif //MULTIPLE_HEAPS record_global_mechanisms(); #endif //GC_CONFIG_DRIVEN if (mark_list_overflow) { grow_mark_list(); mark_list_overflow = false; } } unsigned GCHeap::GetGcCount() { return (unsigned int)VolatileLoad(&pGenGCHeap->settings.gc_index); } size_t GCHeap::GarbageCollectGeneration (unsigned int gen, gc_reason reason) { dprintf (2, ("triggered a GC!")); #ifdef MULTIPLE_HEAPS gc_heap* hpt = gc_heap::g_heaps[0]; #else gc_heap* hpt = 0; #endif //MULTIPLE_HEAPS bool cooperative_mode = true; dynamic_data* dd = hpt->dynamic_data_of (gen); size_t localCount = dd_collection_count (dd); enter_spin_lock (&gc_heap::gc_lock); dprintf (SPINLOCK_LOG, ("GC Egc")); ASSERT_HOLDING_SPIN_LOCK(&gc_heap::gc_lock); //don't trigger another GC if one was already in progress //while waiting for the lock { size_t col_count = dd_collection_count (dd); if (localCount != col_count) { #ifdef SYNCHRONIZATION_STATS gc_lock_contended++; #endif //SYNCHRONIZATION_STATS dprintf (SPINLOCK_LOG, ("no need GC Lgc")); leave_spin_lock (&gc_heap::gc_lock); // We don't need to release msl here 'cause this means a GC // has happened and would have release all msl's. return col_count; } } gc_heap::g_low_memory_status = (reason == reason_lowmemory) || (reason == reason_lowmemory_blocking) || (gc_heap::latency_level == latency_level_memory_footprint); gc_trigger_reason = reason; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap::g_heaps[i]->reset_gc_done(); } #else gc_heap::reset_gc_done(); #endif //MULTIPLE_HEAPS gc_heap::gc_started = TRUE; { init_sync_log_stats(); #ifndef MULTIPLE_HEAPS cooperative_mode = gc_heap::enable_preemptive (); dprintf (2, ("Suspending EE")); gc_heap::suspended_start_time = GetHighPrecisionTimeStamp(); BEGIN_TIMING(suspend_ee_during_log); GCToEEInterface::SuspendEE(SUSPEND_FOR_GC); END_TIMING(suspend_ee_during_log); gc_heap::proceed_with_gc_p = gc_heap::should_proceed_with_gc(); gc_heap::disable_preemptive (cooperative_mode); if (gc_heap::proceed_with_gc_p) pGenGCHeap->settings.init_mechanisms(); else gc_heap::update_collection_counts_for_no_gc(); #endif //!MULTIPLE_HEAPS } unsigned int condemned_generation_number = gen; // We want to get a stack from the user thread that triggered the GC // instead of on the GC thread which is the case for Server GC. // But we are doing it for Workstation GC as well to be uniform. FIRE_EVENT(GCTriggered, static_cast<uint32_t>(reason)); #ifdef MULTIPLE_HEAPS GcCondemnedGeneration = condemned_generation_number; cooperative_mode = gc_heap::enable_preemptive (); BEGIN_TIMING(gc_during_log); gc_heap::ee_suspend_event.Set(); gc_heap::wait_for_gc_done(); END_TIMING(gc_during_log); gc_heap::disable_preemptive (cooperative_mode); condemned_generation_number = GcCondemnedGeneration; #else if (gc_heap::proceed_with_gc_p) { BEGIN_TIMING(gc_during_log); pGenGCHeap->garbage_collect (condemned_generation_number); if (gc_heap::pm_trigger_full_gc) { pGenGCHeap->garbage_collect_pm_full_gc(); } END_TIMING(gc_during_log); } #endif //MULTIPLE_HEAPS #ifdef BACKGROUND_GC // We are deciding whether we should fire the alloc wait end event here // because in begin_foreground we could be calling end_foreground // if we need to retry. if (gc_heap::alloc_wait_event_p) { hpt->fire_alloc_wait_event_end (awr_fgc_wait_for_bgc); gc_heap::alloc_wait_event_p = FALSE; } #endif //BACKGROUND_GC #ifndef MULTIPLE_HEAPS #ifdef BACKGROUND_GC if (!gc_heap::dont_restart_ee_p) #endif //BACKGROUND_GC { #ifdef BACKGROUND_GC gc_heap::add_bgc_pause_duration_0(); #endif //BACKGROUND_GC BEGIN_TIMING(restart_ee_during_log); GCToEEInterface::RestartEE(TRUE); END_TIMING(restart_ee_during_log); } #endif //!MULTIPLE_HEAPS #ifndef MULTIPLE_HEAPS process_sync_log_stats(); gc_heap::gc_started = FALSE; gc_heap::set_gc_done(); dprintf (SPINLOCK_LOG, ("GC Lgc")); leave_spin_lock (&gc_heap::gc_lock); #endif //!MULTIPLE_HEAPS #ifdef FEATURE_PREMORTEM_FINALIZATION GCToEEInterface::EnableFinalization(!pGenGCHeap->settings.concurrent && pGenGCHeap->settings.found_finalizers); #endif // FEATURE_PREMORTEM_FINALIZATION return dd_collection_count (dd); } size_t GCHeap::GetTotalBytesInUse () { #ifdef MULTIPLE_HEAPS //enumerate all the heaps and get their size. size_t tot_size = 0; for (int i = 0; i < gc_heap::n_heaps; i++) { GCHeap* Hp = gc_heap::g_heaps [i]->vm_heap; tot_size += Hp->ApproxTotalBytesInUse(); } return tot_size; #else return ApproxTotalBytesInUse(); #endif //MULTIPLE_HEAPS } // Get the total allocated bytes uint64_t GCHeap::GetTotalAllocatedBytes() { #ifdef MULTIPLE_HEAPS uint64_t total_alloc_bytes = 0; for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; total_alloc_bytes += hp->total_alloc_bytes_soh; total_alloc_bytes += hp->total_alloc_bytes_uoh; } return total_alloc_bytes; #else return (pGenGCHeap->total_alloc_bytes_soh + pGenGCHeap->total_alloc_bytes_uoh); #endif //MULTIPLE_HEAPS } int GCHeap::CollectionCount (int generation, int get_bgc_fgc_count) { if (get_bgc_fgc_count != 0) { #ifdef BACKGROUND_GC if (generation == max_generation) { return (int)(gc_heap::full_gc_counts[gc_type_background]); } else { return (int)(gc_heap::ephemeral_fgc_counts[generation]); } #else return 0; #endif //BACKGROUND_GC } #ifdef MULTIPLE_HEAPS gc_heap* hp = gc_heap::g_heaps [0]; #else //MULTIPLE_HEAPS gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS if (generation > max_generation) return 0; else return (int)dd_collection_count (hp->dynamic_data_of (generation)); } size_t GCHeap::ApproxTotalBytesInUse(BOOL small_heap_only) { size_t totsize = 0; enter_spin_lock (&pGenGCHeap->gc_lock); // For gen0 it's a bit complicated because we are currently allocating in it. We get the fragmentation first // just so that we don't give a negative number for the resulting size. generation* gen = pGenGCHeap->generation_of (0); size_t gen0_frag = generation_free_list_space (gen) + generation_free_obj_space (gen); uint8_t* current_alloc_allocated = pGenGCHeap->alloc_allocated; heap_segment* current_eph_seg = pGenGCHeap->ephemeral_heap_segment; size_t gen0_size = 0; #ifdef USE_REGIONS heap_segment* gen0_seg = generation_start_segment (gen); while (gen0_seg) { uint8_t* end = in_range_for_segment (current_alloc_allocated, gen0_seg) ? current_alloc_allocated : heap_segment_allocated (gen0_seg); gen0_size += end - heap_segment_mem (gen0_seg); if (gen0_seg == current_eph_seg) { break; } gen0_seg = heap_segment_next (gen0_seg); } #else //USE_REGIONS // For segments ephemeral seg does not change. gen0_size = current_alloc_allocated - heap_segment_mem (current_eph_seg); #endif //USE_REGIONS totsize = gen0_size - gen0_frag; int stop_gen_index = max_generation; if (gc_heap::current_c_gc_state == c_gc_state_planning) { // During BGC sweep since we can be deleting SOH segments, we avoid walking the segment // list. generation* oldest_gen = pGenGCHeap->generation_of (max_generation); totsize = pGenGCHeap->background_soh_size_end_mark - generation_free_list_space (oldest_gen) - generation_free_obj_space (oldest_gen); stop_gen_index--; } for (int i = (max_generation - 1); i <= stop_gen_index; i++) { generation* gen = pGenGCHeap->generation_of (i); totsize += pGenGCHeap->generation_size (i) - generation_free_list_space (gen) - generation_free_obj_space (gen); } if (!small_heap_only) { for (int i = uoh_start_generation; i < total_generation_count; i++) { generation* gen = pGenGCHeap->generation_of (i); totsize += pGenGCHeap->generation_size (i) - generation_free_list_space (gen) - generation_free_obj_space (gen); } } leave_spin_lock (&pGenGCHeap->gc_lock); return totsize; } #ifdef MULTIPLE_HEAPS void GCHeap::AssignHeap (alloc_context* acontext) { // Assign heap based on processor acontext->set_alloc_heap(GetHeap(heap_select::select_heap(acontext))); acontext->set_home_heap(acontext->get_alloc_heap()); } GCHeap* GCHeap::GetHeap (int n) { assert (n < gc_heap::n_heaps); return gc_heap::g_heaps[n]->vm_heap; } #endif //MULTIPLE_HEAPS bool GCHeap::IsThreadUsingAllocationContextHeap(gc_alloc_context* context, int thread_number) { alloc_context* acontext = static_cast<alloc_context*>(context); #ifdef MULTIPLE_HEAPS return ((acontext->get_home_heap() == GetHeap(thread_number)) || ((acontext->get_home_heap() == 0) && (thread_number == 0))); #else UNREFERENCED_PARAMETER(acontext); UNREFERENCED_PARAMETER(thread_number); return true; #endif //MULTIPLE_HEAPS } // Returns the number of processors required to trigger the use of thread based allocation contexts int GCHeap::GetNumberOfHeaps () { #ifdef MULTIPLE_HEAPS return gc_heap::n_heaps; #else return 1; #endif //MULTIPLE_HEAPS } /* in this way we spend extra time cycling through all the heaps while create the handle it ought to be changed by keeping alloc_context.home_heap as number (equals heap_number) */ int GCHeap::GetHomeHeapNumber () { #ifdef MULTIPLE_HEAPS gc_alloc_context* ctx = GCToEEInterface::GetAllocContext(); if (!ctx) { return 0; } GCHeap *hp = static_cast<alloc_context*>(ctx)->get_home_heap(); return (hp ? hp->pGenGCHeap->heap_number : 0); #else return 0; #endif //MULTIPLE_HEAPS } unsigned int GCHeap::GetCondemnedGeneration() { return gc_heap::settings.condemned_generation; } void GCHeap::GetMemoryInfo(uint64_t* highMemLoadThresholdBytes, uint64_t* totalAvailableMemoryBytes, uint64_t* lastRecordedMemLoadBytes, uint64_t* lastRecordedHeapSizeBytes, uint64_t* lastRecordedFragmentationBytes, uint64_t* totalCommittedBytes, uint64_t* promotedBytes, uint64_t* pinnedObjectCount, uint64_t* finalizationPendingCount, uint64_t* index, uint32_t* generation, uint32_t* pauseTimePct, bool* isCompaction, bool* isConcurrent, uint64_t* genInfoRaw, uint64_t* pauseInfoRaw, int kind) { last_recorded_gc_info* last_gc_info = 0; if ((gc_kind)kind == gc_kind_ephemeral) { last_gc_info = &gc_heap::last_ephemeral_gc_info; } else if ((gc_kind)kind == gc_kind_full_blocking) { last_gc_info = &gc_heap::last_full_blocking_gc_info; } #ifdef BACKGROUND_GC else if ((gc_kind)kind == gc_kind_background) { last_gc_info = gc_heap::get_completed_bgc_info(); } #endif //BACKGROUND_GC else { assert ((gc_kind)kind == gc_kind_any); #ifdef BACKGROUND_GC if (gc_heap::is_last_recorded_bgc) { last_gc_info = gc_heap::get_completed_bgc_info(); } else #endif //BACKGROUND_GC { last_gc_info = ((gc_heap::last_ephemeral_gc_info.index > gc_heap::last_full_blocking_gc_info.index) ? &gc_heap::last_ephemeral_gc_info : &gc_heap::last_full_blocking_gc_info); } } *highMemLoadThresholdBytes = (uint64_t) (((double)(gc_heap::high_memory_load_th)) / 100 * gc_heap::total_physical_mem); *totalAvailableMemoryBytes = gc_heap::heap_hard_limit != 0 ? gc_heap::heap_hard_limit : gc_heap::total_physical_mem; *lastRecordedMemLoadBytes = (uint64_t) (((double)(last_gc_info->memory_load)) / 100 * gc_heap::total_physical_mem); *lastRecordedHeapSizeBytes = last_gc_info->heap_size; *lastRecordedFragmentationBytes = last_gc_info->fragmentation; *totalCommittedBytes = last_gc_info->total_committed; *promotedBytes = last_gc_info->promoted; *pinnedObjectCount = last_gc_info->pinned_objects; *finalizationPendingCount = last_gc_info->finalize_promoted_objects; *index = last_gc_info->index; *generation = last_gc_info->condemned_generation; *pauseTimePct = (int)(last_gc_info->pause_percentage * 100); *isCompaction = last_gc_info->compaction; *isConcurrent = last_gc_info->concurrent; int genInfoIndex = 0; for (int i = 0; i < total_generation_count; i++) { genInfoRaw[genInfoIndex++] = last_gc_info->gen_info[i].size_before; genInfoRaw[genInfoIndex++] = last_gc_info->gen_info[i].fragmentation_before; genInfoRaw[genInfoIndex++] = last_gc_info->gen_info[i].size_after; genInfoRaw[genInfoIndex++] = last_gc_info->gen_info[i].fragmentation_after; } for (int i = 0; i < 2; i++) { // convert it to 100-ns units that TimeSpan needs. pauseInfoRaw[i] = (uint64_t)(last_gc_info->pause_durations[i]) * 10; } #ifdef _DEBUG if ((gc_kind)kind == gc_kind_ephemeral) { assert (last_gc_info->condemned_generation < max_generation); } else if ((gc_kind)kind == gc_kind_full_blocking) { assert (last_gc_info->condemned_generation == max_generation); assert (last_gc_info->concurrent == false); } #ifdef BACKGROUND_GC else if ((gc_kind)kind == gc_kind_background) { assert (last_gc_info->condemned_generation == max_generation); assert (last_gc_info->concurrent == true); } #endif //BACKGROUND_GC #endif //_DEBUG } uint32_t GCHeap::GetMemoryLoad() { uint32_t memory_load = 0; if (gc_heap::settings.exit_memory_load != 0) memory_load = gc_heap::settings.exit_memory_load; else if (gc_heap::settings.entry_memory_load != 0) memory_load = gc_heap::settings.entry_memory_load; return memory_load; } int GCHeap::GetGcLatencyMode() { return (int)(pGenGCHeap->settings.pause_mode); } int GCHeap::SetGcLatencyMode (int newLatencyMode) { if (gc_heap::settings.pause_mode == pause_no_gc) return (int)set_pause_mode_no_gc; gc_pause_mode new_mode = (gc_pause_mode)newLatencyMode; if (new_mode == pause_low_latency) { #ifndef MULTIPLE_HEAPS pGenGCHeap->settings.pause_mode = new_mode; #endif //!MULTIPLE_HEAPS } else if (new_mode == pause_sustained_low_latency) { #ifdef BACKGROUND_GC if (gc_heap::gc_can_use_concurrent) { pGenGCHeap->settings.pause_mode = new_mode; } #endif //BACKGROUND_GC } else { pGenGCHeap->settings.pause_mode = new_mode; } #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { // If we get here, it means we are doing an FGC. If the pause // mode was altered we will need to save it in the BGC settings. if (gc_heap::saved_bgc_settings.pause_mode != new_mode) { gc_heap::saved_bgc_settings.pause_mode = new_mode; } } #endif //BACKGROUND_GC return (int)set_pause_mode_success; } int GCHeap::GetLOHCompactionMode() { #ifdef FEATURE_LOH_COMPACTION return pGenGCHeap->loh_compaction_mode; #else return loh_compaction_default; #endif //FEATURE_LOH_COMPACTION } void GCHeap::SetLOHCompactionMode (int newLOHCompactionMode) { #ifdef FEATURE_LOH_COMPACTION pGenGCHeap->loh_compaction_mode = (gc_loh_compaction_mode)newLOHCompactionMode; #endif //FEATURE_LOH_COMPACTION } bool GCHeap::RegisterForFullGCNotification(uint32_t gen2Percentage, uint32_t lohPercentage) { #ifdef MULTIPLE_HEAPS for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; hp->fgn_last_alloc = dd_new_allocation (hp->dynamic_data_of (0)); hp->fgn_maxgen_percent = gen2Percentage; } #else //MULTIPLE_HEAPS pGenGCHeap->fgn_last_alloc = dd_new_allocation (pGenGCHeap->dynamic_data_of (0)); pGenGCHeap->fgn_maxgen_percent = gen2Percentage; #endif //MULTIPLE_HEAPS pGenGCHeap->full_gc_approach_event.Reset(); pGenGCHeap->full_gc_end_event.Reset(); pGenGCHeap->full_gc_approach_event_set = false; pGenGCHeap->fgn_loh_percent = lohPercentage; return TRUE; } bool GCHeap::CancelFullGCNotification() { #ifdef MULTIPLE_HEAPS for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; hp->fgn_maxgen_percent = 0; } #else //MULTIPLE_HEAPS pGenGCHeap->fgn_maxgen_percent = 0; #endif //MULTIPLE_HEAPS pGenGCHeap->fgn_loh_percent = 0; pGenGCHeap->full_gc_approach_event.Set(); pGenGCHeap->full_gc_end_event.Set(); return TRUE; } int GCHeap::WaitForFullGCApproach(int millisecondsTimeout) { dprintf (2, ("WFGA: Begin wait")); int result = gc_heap::full_gc_wait (&(pGenGCHeap->full_gc_approach_event), millisecondsTimeout); dprintf (2, ("WFGA: End wait")); return result; } int GCHeap::WaitForFullGCComplete(int millisecondsTimeout) { dprintf (2, ("WFGE: Begin wait")); int result = gc_heap::full_gc_wait (&(pGenGCHeap->full_gc_end_event), millisecondsTimeout); dprintf (2, ("WFGE: End wait")); return result; } int GCHeap::StartNoGCRegion(uint64_t totalSize, bool lohSizeKnown, uint64_t lohSize, bool disallowFullBlockingGC) { NoGCRegionLockHolder lh; dprintf (1, ("begin no gc called")); start_no_gc_region_status status = gc_heap::prepare_for_no_gc_region (totalSize, lohSizeKnown, lohSize, disallowFullBlockingGC); if (status == start_no_gc_success) { GarbageCollect (max_generation); status = gc_heap::get_start_no_gc_region_status(); } if (status != start_no_gc_success) gc_heap::handle_failure_for_no_gc(); return (int)status; } int GCHeap::EndNoGCRegion() { NoGCRegionLockHolder lh; return (int)gc_heap::end_no_gc_region(); } void GCHeap::PublishObject (uint8_t* Obj) { #ifdef BACKGROUND_GC gc_heap* hp = gc_heap::heap_of (Obj); hp->bgc_alloc_lock->uoh_alloc_done (Obj); hp->bgc_untrack_uoh_alloc(); #endif //BACKGROUND_GC } // The spec for this one isn't clear. This function // returns the size that can be allocated without // triggering a GC of any kind. size_t GCHeap::ApproxFreeBytes() { enter_spin_lock (&pGenGCHeap->gc_lock); generation* gen = pGenGCHeap->generation_of (0); size_t res = generation_allocation_limit (gen) - generation_allocation_pointer (gen); leave_spin_lock (&pGenGCHeap->gc_lock); return res; } HRESULT GCHeap::GetGcCounters(int gen, gc_counters* counters) { if ((gen < 0) || (gen > max_generation)) return E_FAIL; #ifdef MULTIPLE_HEAPS counters->current_size = 0; counters->promoted_size = 0; counters->collection_count = 0; //enumerate all the heaps and get their counters. for (int i = 0; i < gc_heap::n_heaps; i++) { dynamic_data* dd = gc_heap::g_heaps [i]->dynamic_data_of (gen); counters->current_size += dd_current_size (dd); counters->promoted_size += dd_promoted_size (dd); if (i == 0) counters->collection_count += dd_collection_count (dd); } #else dynamic_data* dd = pGenGCHeap->dynamic_data_of (gen); counters->current_size = dd_current_size (dd); counters->promoted_size = dd_promoted_size (dd); counters->collection_count = dd_collection_count (dd); #endif //MULTIPLE_HEAPS return S_OK; } // Get the segment size to use, making sure it conforms. size_t GCHeap::GetValidSegmentSize(bool large_seg) { #ifdef USE_REGIONS return (large_seg ? global_region_allocator.get_large_region_alignment() : global_region_allocator.get_region_alignment()); #else return (large_seg ? gc_heap::min_uoh_segment_size : gc_heap::soh_segment_size); #endif //USE_REGIONS } size_t gc_heap::get_gen0_min_size() { size_t gen0size = static_cast<size_t>(GCConfig::GetGen0Size()); bool is_config_invalid = ((gen0size == 0) || !g_theGCHeap->IsValidGen0MaxSize(gen0size)); if (is_config_invalid) { #ifdef SERVER_GC // performance data seems to indicate halving the size results // in optimal perf. Ask for adjusted gen0 size. gen0size = max(GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE),(256*1024)); // if gen0 size is too large given the available memory, reduce it. // Get true cache size, as we don't want to reduce below this. size_t trueSize = max(GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE),(256*1024)); dprintf (1, ("cache: %Id-%Id", GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE), GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE))); int n_heaps = gc_heap::n_heaps; #else //SERVER_GC size_t trueSize = GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE); gen0size = max((4*trueSize/5),(256*1024)); trueSize = max(trueSize, (256*1024)); int n_heaps = 1; #endif //SERVER_GC dprintf (1, ("gen0size: %Id * %d = %Id, physical mem: %Id / 6 = %Id", gen0size, n_heaps, (gen0size * n_heaps), gc_heap::total_physical_mem, gc_heap::total_physical_mem / 6)); // if the total min GC across heaps will exceed 1/6th of available memory, // then reduce the min GC size until it either fits or has been reduced to cache size. while ((gen0size * n_heaps) > (gc_heap::total_physical_mem / 6)) { gen0size = gen0size / 2; if (gen0size <= trueSize) { gen0size = trueSize; break; } } } #ifdef FEATURE_EVENT_TRACE else { gen0_min_budget_from_config = gen0size; } #endif //FEATURE_EVENT_TRACE size_t seg_size = gc_heap::soh_segment_size; assert (seg_size); // Generation 0 must never be more than 1/2 the segment size. if (gen0size >= (seg_size / 2)) gen0size = seg_size / 2; // If the value from config is valid we use it as is without this adjustment. if (is_config_invalid) { if (heap_hard_limit) { size_t gen0size_seg = seg_size / 8; if (gen0size >= gen0size_seg) { dprintf (1, ("gen0 limited by seg size %Id->%Id", gen0size, gen0size_seg)); gen0size = gen0size_seg; } } gen0size = gen0size / 8 * 5; } #ifdef USE_REGIONS #ifdef STRESS_REGIONS // This is just so we can test allocation using more than one region on machines with very // small caches. gen0size = ((size_t)1 << min_segment_size_shr) * 3; #endif //STRESS_REGIONS #endif //USE_REGIONS gen0size = Align (gen0size); return gen0size; } void GCHeap::SetReservedVMLimit (size_t vmlimit) { gc_heap::reserved_memory_limit = vmlimit; } //versions of same method on each heap #ifdef FEATURE_PREMORTEM_FINALIZATION Object* GCHeap::GetNextFinalizableObject() { #ifdef MULTIPLE_HEAPS //return the first non critical one in the first queue. for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; Object* O = hp->finalize_queue->GetNextFinalizableObject(TRUE); if (O) return O; } //return the first non critical/critical one in the first queue. for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; Object* O = hp->finalize_queue->GetNextFinalizableObject(FALSE); if (O) return O; } return 0; #else //MULTIPLE_HEAPS return pGenGCHeap->finalize_queue->GetNextFinalizableObject(); #endif //MULTIPLE_HEAPS } size_t GCHeap::GetNumberFinalizableObjects() { #ifdef MULTIPLE_HEAPS size_t cnt = 0; for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; cnt += hp->finalize_queue->GetNumberFinalizableObjects(); } return cnt; #else //MULTIPLE_HEAPS return pGenGCHeap->finalize_queue->GetNumberFinalizableObjects(); #endif //MULTIPLE_HEAPS } size_t GCHeap::GetFinalizablePromotedCount() { #ifdef MULTIPLE_HEAPS size_t cnt = 0; for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; cnt += hp->finalize_queue->GetPromotedCount(); } return cnt; #else //MULTIPLE_HEAPS return pGenGCHeap->finalize_queue->GetPromotedCount(); #endif //MULTIPLE_HEAPS } //--------------------------------------------------------------------------- // Finalized class tracking //--------------------------------------------------------------------------- bool GCHeap::RegisterForFinalization (int gen, Object* obj) { if (gen == -1) gen = 0; if (((((CObjectHeader*)obj)->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN)) { ((CObjectHeader*)obj)->GetHeader()->ClrBit(BIT_SBLK_FINALIZER_RUN); return true; } else { gc_heap* hp = gc_heap::heap_of ((uint8_t*)obj); return hp->finalize_queue->RegisterForFinalization (gen, obj); } } void GCHeap::SetFinalizationRun (Object* obj) { ((CObjectHeader*)obj)->GetHeader()->SetBit(BIT_SBLK_FINALIZER_RUN); } //-------------------------------------------------------------------- // // Support for finalization // //-------------------------------------------------------------------- inline unsigned int gen_segment (int gen) { assert (((signed)total_generation_count - gen - 1)>=0); return (total_generation_count - gen - 1); } bool CFinalize::Initialize() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; m_Array = new (nothrow)(Object*[100]); if (!m_Array) { ASSERT (m_Array); STRESS_LOG_OOM_STACK(sizeof(Object*[100])); if (GCConfig::GetBreakOnOOM()) { GCToOSInterface::DebugBreak(); } return false; } m_EndArray = &m_Array[100]; for (int i =0; i < FreeList; i++) { SegQueueLimit (i) = m_Array; } m_PromotedCount = 0; lock = -1; #ifdef _DEBUG lockowner_threadid.Clear(); #endif // _DEBUG return true; } CFinalize::~CFinalize() { delete m_Array; } size_t CFinalize::GetPromotedCount () { return m_PromotedCount; } inline void CFinalize::EnterFinalizeLock() { _ASSERTE(dbgOnly_IsSpecialEEThread() || GCToEEInterface::GetThread() == 0 || GCToEEInterface::IsPreemptiveGCDisabled()); retry: if (Interlocked::CompareExchange(&lock, 0, -1) >= 0) { unsigned int i = 0; while (lock >= 0) { YieldProcessor(); // indicate to the processor that we are spinning if (++i & 7) GCToOSInterface::YieldThread (0); else GCToOSInterface::Sleep (5); } goto retry; } #ifdef _DEBUG lockowner_threadid.SetToCurrentThread(); #endif // _DEBUG } inline void CFinalize::LeaveFinalizeLock() { _ASSERTE(dbgOnly_IsSpecialEEThread() || GCToEEInterface::GetThread() == 0 || GCToEEInterface::IsPreemptiveGCDisabled()); #ifdef _DEBUG lockowner_threadid.Clear(); #endif // _DEBUG lock = -1; } bool CFinalize::RegisterForFinalization (int gen, Object* obj, size_t size) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; EnterFinalizeLock(); // Adjust gen unsigned int dest = gen_segment (gen); // Adjust boundary for segments so that GC will keep objects alive. Object*** s_i = &SegQueue (FreeList); if ((*s_i) == m_EndArray) { if (!GrowArray()) { LeaveFinalizeLock(); if (method_table(obj) == NULL) { // If the object is uninitialized, a valid size should have been passed. assert (size >= Align (min_obj_size)); dprintf (3, (ThreadStressLog::gcMakeUnusedArrayMsg(), (size_t)obj, (size_t)(obj+size))); ((CObjectHeader*)obj)->SetFree(size); } STRESS_LOG_OOM_STACK(0); if (GCConfig::GetBreakOnOOM()) { GCToOSInterface::DebugBreak(); } return false; } } Object*** end_si = &SegQueueLimit (dest); do { //is the segment empty? if (!(*s_i == *(s_i-1))) { //no, swap the end elements. *(*s_i) = *(*(s_i-1)); } //increment the fill pointer (*s_i)++; //go to the next segment. s_i--; } while (s_i > end_si); // We have reached the destination segment // store the object **s_i = obj; // increment the fill pointer (*s_i)++; LeaveFinalizeLock(); return true; } Object* CFinalize::GetNextFinalizableObject (BOOL only_non_critical) { Object* obj = 0; EnterFinalizeLock(); if (!IsSegEmpty(FinalizerListSeg)) { obj = *(--SegQueueLimit (FinalizerListSeg)); } else if (!only_non_critical && !IsSegEmpty(CriticalFinalizerListSeg)) { //the FinalizerList is empty, we can adjust both // limit instead of moving the object to the free list obj = *(--SegQueueLimit (CriticalFinalizerListSeg)); --SegQueueLimit (FinalizerListSeg); } if (obj) { dprintf (3, ("running finalizer for %Ix (mt: %Ix)", obj, method_table (obj))); } LeaveFinalizeLock(); return obj; } size_t CFinalize::GetNumberFinalizableObjects() { return SegQueueLimit(FinalizerListSeg) - SegQueue(FinalizerListSeg); } void CFinalize::MoveItem (Object** fromIndex, unsigned int fromSeg, unsigned int toSeg) { int step; ASSERT (fromSeg != toSeg); if (fromSeg > toSeg) step = -1; else step = +1; // Place the element at the boundary closest to dest Object** srcIndex = fromIndex; for (unsigned int i = fromSeg; i != toSeg; i+= step) { Object**& destFill = m_FillPointers[i+(step - 1 )/2]; Object** destIndex = destFill - (step + 1)/2; if (srcIndex != destIndex) { Object* tmp = *srcIndex; *srcIndex = *destIndex; *destIndex = tmp; } destFill -= step; srcIndex = destIndex; } } void CFinalize::GcScanRoots (promote_func* fn, int hn, ScanContext *pSC) { ScanContext sc; if (pSC == 0) pSC = &sc; pSC->thread_number = hn; //scan the finalization queue Object** startIndex = SegQueue (CriticalFinalizerListSeg); Object** stopIndex = SegQueueLimit (FinalizerListSeg); for (Object** po = startIndex; po < stopIndex; po++) { Object* o = *po; //dprintf (3, ("scan freacheable %Ix", (size_t)o)); dprintf (3, ("scan f %Ix", (size_t)o)); (*fn)(po, pSC, 0); } } void CFinalize::WalkFReachableObjects (fq_walk_fn fn) { Object** startIndex = SegQueue (CriticalFinalizerListSeg); Object** stopCriticalIndex = SegQueueLimit (CriticalFinalizerListSeg); Object** stopIndex = SegQueueLimit (FinalizerListSeg); for (Object** po = startIndex; po < stopIndex; po++) { fn(po < stopCriticalIndex, *po); } } BOOL CFinalize::ScanForFinalization (promote_func* pfn, int gen, BOOL mark_only_p, gc_heap* hp) { ScanContext sc; sc.promotion = TRUE; #ifdef MULTIPLE_HEAPS sc.thread_number = hp->heap_number; #else UNREFERENCED_PARAMETER(hp); #endif //MULTIPLE_HEAPS BOOL finalizedFound = FALSE; //start with gen and explore all the younger generations. unsigned int startSeg = gen_segment (gen); { m_PromotedCount = 0; for (unsigned int Seg = startSeg; Seg <= gen_segment(0); Seg++) { Object** endIndex = SegQueue (Seg); for (Object** i = SegQueueLimit (Seg)-1; i >= endIndex ;i--) { CObjectHeader* obj = (CObjectHeader*)*i; dprintf (3, ("scanning: %Ix", (size_t)obj)); if (!g_theGCHeap->IsPromoted (obj)) { dprintf (3, ("freacheable: %Ix", (size_t)obj)); assert (method_table(obj)->HasFinalizer()); if (GCToEEInterface::EagerFinalized(obj)) { MoveItem (i, Seg, FreeList); } else if ((obj->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN) { //remove the object because we don't want to //run the finalizer MoveItem (i, Seg, FreeList); //Reset the bit so it will be put back on the queue //if resurrected and re-registered. obj->GetHeader()->ClrBit (BIT_SBLK_FINALIZER_RUN); } else { m_PromotedCount++; if (method_table(obj)->HasCriticalFinalizer()) { MoveItem (i, Seg, CriticalFinalizerListSeg); } else { MoveItem (i, Seg, FinalizerListSeg); } } } #ifdef BACKGROUND_GC else { if ((gen == max_generation) && (gc_heap::background_running_p())) { // TODO - fix the following line. //assert (gc_heap::background_object_marked ((uint8_t*)obj, FALSE)); dprintf (3, ("%Ix is marked", (size_t)obj)); } } #endif //BACKGROUND_GC } } } finalizedFound = !IsSegEmpty(FinalizerListSeg) || !IsSegEmpty(CriticalFinalizerListSeg); if (finalizedFound) { //Promote the f-reachable objects GcScanRoots (pfn, #ifdef MULTIPLE_HEAPS hp->heap_number #else 0 #endif //MULTIPLE_HEAPS , 0); hp->settings.found_finalizers = TRUE; #ifdef BACKGROUND_GC if (hp->settings.concurrent) { hp->settings.found_finalizers = !(IsSegEmpty(FinalizerListSeg) && IsSegEmpty(CriticalFinalizerListSeg)); } #endif //BACKGROUND_GC if (hp->settings.concurrent && hp->settings.found_finalizers) { if (!mark_only_p) GCToEEInterface::EnableFinalization(true); } } return finalizedFound; } //Relocates all of the objects in the finalization array void CFinalize::RelocateFinalizationData (int gen, gc_heap* hp) { ScanContext sc; sc.promotion = FALSE; #ifdef MULTIPLE_HEAPS sc.thread_number = hp->heap_number; #else UNREFERENCED_PARAMETER(hp); #endif //MULTIPLE_HEAPS unsigned int Seg = gen_segment (gen); Object** startIndex = SegQueue (Seg); dprintf (3, ("RelocateFinalizationData gen=%d, [%Ix,%Ix[", gen, startIndex, SegQueue (FreeList))); for (Object** po = startIndex; po < SegQueue (FreeList);po++) { GCHeap::Relocate (po, &sc); } } void CFinalize::UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p) { dprintf(3, ("UpdatePromotedGenerations gen=%d, gen_0_empty_p=%d", gen, gen_0_empty_p)); // update the generation fill pointers. // if gen_0_empty is FALSE, test each object to find out if // it was promoted or not if (gen_0_empty_p) { for (int i = min (gen+1, max_generation); i > 0; i--) { m_FillPointers [gen_segment(i)] = m_FillPointers [gen_segment(i-1)]; } } else { //Look for demoted or promoted objects for (int i = gen; i >= 0; i--) { unsigned int Seg = gen_segment (i); Object** startIndex = SegQueue (Seg); for (Object** po = startIndex; po < SegQueueLimit (gen_segment(i)); po++) { int new_gen = g_theGCHeap->WhichGeneration (*po); if (new_gen != i) { dprintf (3, ("Moving object %Ix->%Ix from gen %d to gen %d", po, *po, i, new_gen)); if (new_gen > i) { //promotion MoveItem (po, gen_segment (i), gen_segment (new_gen)); } else { //demotion MoveItem (po, gen_segment (i), gen_segment (new_gen)); //back down in order to see all objects. po--; } } } } } } BOOL CFinalize::GrowArray() { size_t oldArraySize = (m_EndArray - m_Array); size_t newArraySize = (size_t)(((float)oldArraySize / 10) * 12); Object** newArray = new (nothrow) Object*[newArraySize]; if (!newArray) { return FALSE; } memcpy (newArray, m_Array, oldArraySize*sizeof(Object*)); dprintf (3, ("Grow finalizer array [%Ix,%Ix[ -> [%Ix,%Ix[", m_Array, m_EndArray, newArray, &m_Array[newArraySize])); //adjust the fill pointers for (int i = 0; i < FreeList; i++) { m_FillPointers [i] += (newArray - m_Array); } delete[] m_Array; m_Array = newArray; m_EndArray = &m_Array [newArraySize]; return TRUE; } #ifdef VERIFY_HEAP void CFinalize::CheckFinalizerObjects() { for (int i = 0; i <= max_generation; i++) { Object **startIndex = SegQueue (gen_segment (i)); Object **stopIndex = SegQueueLimit (gen_segment (i)); for (Object **po = startIndex; po < stopIndex; po++) { if ((int)g_theGCHeap->WhichGeneration (*po) < i) FATAL_GC_ERROR (); ((CObjectHeader*)*po)->Validate(); } } } #endif //VERIFY_HEAP #endif // FEATURE_PREMORTEM_FINALIZATION //------------------------------------------------------------------------------ // // End of VM specific support // //------------------------------------------------------------------------------ void gc_heap::walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p) { generation* gen = gc_heap::generation_of (gen_number); heap_segment* seg = generation_start_segment (gen); uint8_t* x = ((gen_number == max_generation) ? heap_segment_mem (seg) : get_soh_start_object (seg, gen)); uint8_t* end = heap_segment_allocated (seg); int align_const = get_alignment_constant (TRUE); BOOL walk_pinned_object_heap = walk_large_object_heap_p; while (1) { if (x >= end) { if ((seg = heap_segment_next (seg)) != 0) { x = heap_segment_mem (seg); end = heap_segment_allocated (seg); continue; } #ifdef USE_REGIONS else if (gen_number > 0) { // advance to next lower generation gen_number--; gen = gc_heap::generation_of (gen_number); seg = generation_start_segment (gen); x = heap_segment_mem (seg); end = heap_segment_allocated (seg); continue; } #endif // USE_REGIONS else { if (walk_large_object_heap_p) { walk_large_object_heap_p = FALSE; seg = generation_start_segment (large_object_generation); } else if (walk_pinned_object_heap) { walk_pinned_object_heap = FALSE; seg = generation_start_segment (pinned_object_generation); } else { break; } align_const = get_alignment_constant (FALSE); x = heap_segment_mem (seg); end = heap_segment_allocated (seg); continue; } } size_t s = size (x); CObjectHeader* o = (CObjectHeader*)x; if (!o->IsFree()) { _ASSERTE(((size_t)o & 0x3) == 0); // Last two bits should never be set at this point if (!fn (o->GetObjectBase(), context)) return; } x = x + Align (s, align_const); } } void gc_heap::walk_finalize_queue (fq_walk_fn fn) { #ifdef FEATURE_PREMORTEM_FINALIZATION finalize_queue->WalkFReachableObjects (fn); #endif //FEATURE_PREMORTEM_FINALIZATION } void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p) { #ifdef MULTIPLE_HEAPS for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; hp->walk_heap_per_heap (fn, context, gen_number, walk_large_object_heap_p); } #else walk_heap_per_heap(fn, context, gen_number, walk_large_object_heap_p); #endif //MULTIPLE_HEAPS } void GCHeap::DiagWalkObject (Object* obj, walk_fn fn, void* context) { uint8_t* o = (uint8_t*)obj; if (o) { go_through_object_cl (method_table (o), o, size(o), oo, { if (*oo) { Object *oh = (Object*)*oo; if (!fn (oh, context)) return; } } ); } } void GCHeap::DiagWalkObject2 (Object* obj, walk_fn2 fn, void* context) { uint8_t* o = (uint8_t*)obj; if (o) { go_through_object_cl (method_table (o), o, size(o), oo, { if (*oo) { if (!fn (obj, oo, context)) return; } } ); } } void GCHeap::DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, void* diag_context, walk_surv_type type, int gen_number) { gc_heap* hp = (gc_heap*)gc_context; if (type == walk_for_uoh) { hp->walk_survivors_for_uoh (diag_context, fn, gen_number); } else { hp->walk_survivors (fn, diag_context, type); } } void GCHeap::DiagWalkHeap (walk_fn fn, void* context, int gen_number, bool walk_large_object_heap_p) { gc_heap::walk_heap (fn, context, gen_number, walk_large_object_heap_p); } void GCHeap::DiagWalkFinalizeQueue (void* gc_context, fq_walk_fn fn) { gc_heap* hp = (gc_heap*)gc_context; hp->walk_finalize_queue (fn); } void GCHeap::DiagScanFinalizeQueue (fq_scan_fn fn, ScanContext* sc) { #ifdef MULTIPLE_HEAPS for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; hp->finalize_queue->GcScanRoots(fn, hn, sc); } #else pGenGCHeap->finalize_queue->GcScanRoots(fn, 0, sc); #endif //MULTIPLE_HEAPS } void GCHeap::DiagScanHandles (handle_scan_fn fn, int gen_number, ScanContext* context) { GCScan::GcScanHandlesForProfilerAndETW (gen_number, context, fn); } void GCHeap::DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context) { GCScan::GcScanDependentHandlesForProfilerAndETW (gen_number, context, fn); } void GCHeap::DiagGetGCSettings(EtwGCSettingsInfo* etw_settings) { #ifdef FEATURE_EVENT_TRACE etw_settings->heap_hard_limit = gc_heap::heap_hard_limit; etw_settings->loh_threshold = loh_size_threshold; etw_settings->physical_memory_from_config = gc_heap::physical_memory_from_config; etw_settings->gen0_min_budget_from_config = gc_heap::gen0_min_budget_from_config; etw_settings->gen0_max_budget_from_config = gc_heap::gen0_max_budget_from_config; etw_settings->high_mem_percent_from_config = gc_heap::high_mem_percent_from_config; #ifdef BACKGROUND_GC etw_settings->concurrent_gc_p = gc_heap::gc_can_use_concurrent; #else etw_settings->concurrent_gc_p = false; #endif //BACKGROUND_GC etw_settings->use_large_pages_p = gc_heap::use_large_pages_p; etw_settings->use_frozen_segments_p = gc_heap::use_frozen_segments_p; etw_settings->hard_limit_config_p = gc_heap::hard_limit_config_p; etw_settings->no_affinitize_p = #ifdef MULTIPLE_HEAPS gc_heap::gc_thread_no_affinitize_p; #else true; #endif //MULTIPLE_HEAPS #endif //FEATURE_EVENT_TRACE } #if defined(WRITE_BARRIER_CHECK) && !defined (SERVER_GC) // This code is designed to catch the failure to update the write barrier // The way it works is to copy the whole heap right after every GC. The write // barrier code has been modified so that it updates the shadow as well as the // real GC heap. Before doing the next GC, we walk the heap, looking for pointers // that were updated in the real heap, but not the shadow. A mismatch indicates // an error. The offending code can be found by breaking after the correct GC, // and then placing a data breakpoint on the Heap location that was updated without // going through the write barrier. // Called at process shutdown void deleteGCShadow() { if (g_GCShadow != 0) GCToOSInterface::VirtualRelease (g_GCShadow, g_GCShadowEnd - g_GCShadow); g_GCShadow = 0; g_GCShadowEnd = 0; } // Called at startup and right after a GC, get a snapshot of the GC Heap void initGCShadow() { if (!(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_BARRIERCHECK)) return; size_t len = g_gc_highest_address - g_gc_lowest_address; if (len > (size_t)(g_GCShadowEnd - g_GCShadow)) { deleteGCShadow(); g_GCShadowEnd = g_GCShadow = (uint8_t *)GCToOSInterface::VirtualReserve(len, 0, VirtualReserveFlags::None); if (g_GCShadow == NULL || !GCToOSInterface::VirtualCommit(g_GCShadow, len)) { _ASSERTE(!"Not enough memory to run HeapVerify level 2"); // If after the assert we decide to allow the program to continue // running we need to be in a state that will not trigger any // additional AVs while we fail to allocate a shadow segment, i.e. // ensure calls to updateGCShadow() checkGCWriteBarrier() don't AV deleteGCShadow(); return; } g_GCShadowEnd += len; } // save the value of g_gc_lowest_address at this time. If this value changes before // the next call to checkGCWriteBarrier() it means we extended the heap (with a // large object segment most probably), and the whole shadow segment is inconsistent. g_shadow_lowest_address = g_gc_lowest_address; //****** Copy the whole GC heap ****** // // NOTE: This is the one situation where the combination of heap_segment_rw(gen_start_segment()) // can produce a NULL result. This is because the initialization has not completed. // for (int i = get_start_generation_index(); i < total_generation_count; i++) { generation* gen = gc_heap::generation_of (i); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); ptrdiff_t delta = g_GCShadow - g_gc_lowest_address; while (seg) { // Copy the segment uint8_t* start = heap_segment_mem(seg); uint8_t* end = heap_segment_allocated (seg); memcpy(start + delta, start, end - start); seg = heap_segment_next_rw (seg); } } } #define INVALIDGCVALUE (void*)((size_t)0xcccccccd) // test to see if 'ptr' was only updated via the write barrier. inline void testGCShadow(Object** ptr) { Object** shadow = (Object**) &g_GCShadow[((uint8_t*) ptr - g_gc_lowest_address)]; if (*ptr != 0 && (uint8_t*) shadow < g_GCShadowEnd && *ptr != *shadow) { // If you get this assertion, someone updated a GC pointer in the heap without // using the write barrier. To find out who, check the value of // dd_collection_count (dynamic_data_of (0)). Also // note the value of 'ptr'. Rerun the App that the previous GC just occurred. // Then put a data breakpoint for the value of 'ptr' Then check every write // to pointer between the two GCs. The last one is not using the write barrier. // If the memory of interest does not exist at system startup, // you need to set the data breakpoint right after the memory gets committed // Set a breakpoint at the end of grow_heap_segment, and put the value of 'ptr' // in the memory window. run until the memory gets mapped. Then you can set // your breakpoint // Note a recent change, we've identified race conditions when updating the gc shadow. // Throughout the runtime, code will update an address in the gc heap, then erect the // write barrier, which calls updateGCShadow. With an app that pounds one heap location // from multiple threads, you can hit this assert even though all involved are using the // write barrier properly. Thusly, we detect the race and set this location to INVALIDGCVALUE. // TODO: the code in jithelp.asm doesn't call updateGCShadow, and hasn't been // TODO: fixed to detect the race. We've only seen this race from VolatileWritePtr, // TODO: so elect not to fix jithelp.asm at this time. It should be done if we start hitting // TODO: erroneous asserts in here. if(*shadow!=INVALIDGCVALUE) { #ifdef FEATURE_BASICFREEZE // Write barriers for stores of references to frozen objects may be optimized away. if (!gc_heap::frozen_object_p(*ptr)) #endif // FEATURE_BASICFREEZE { _ASSERTE(!"Pointer updated without using write barrier"); } } /* else { printf("saw a INVALIDGCVALUE. (just to let you know)\n"); } */ } } void testGCShadowHelper (uint8_t* x) { size_t s = size (x); if (contain_pointers (x)) { go_through_object_nostart (method_table(x), x, s, oo, { testGCShadow((Object**) oo); }); } } // Walk the whole heap, looking for pointers that were not updated with the write barrier. void checkGCWriteBarrier() { // g_shadow_lowest_address != g_gc_lowest_address means the GC heap was extended by a segment // and the GC shadow segment did not track that change! if (g_GCShadowEnd <= g_GCShadow || g_shadow_lowest_address != g_gc_lowest_address) { // No shadow stack, nothing to check. return; } { generation* gen = gc_heap::generation_of (max_generation); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(seg != NULL); while(seg) { uint8_t* x = heap_segment_mem(seg); while (x < heap_segment_allocated (seg)) { size_t s = size (x); testGCShadowHelper (x); x = x + Align (s); } seg = heap_segment_next_rw (seg); } } { // go through non-soh object heaps int alignment = get_alignment_constant(FALSE); for (int i = uoh_start_generation; i < total_generation_count; i++) { generation* gen = gc_heap::generation_of (i); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(seg != NULL); while(seg) { uint8_t* x = heap_segment_mem(seg); while (x < heap_segment_allocated (seg)) { size_t s = size (x); testGCShadowHelper (x); x = x + Align (s, alignment); } seg = heap_segment_next_rw (seg); } } } } #endif //WRITE_BARRIER_CHECK && !SERVER_GC #ifdef FEATURE_BASICFREEZE void gc_heap::walk_read_only_segment(heap_segment *seg, void *pvContext, object_callback_func pfnMethodTable, object_callback_func pfnObjRef) { uint8_t *o = heap_segment_mem(seg); int alignment = get_alignment_constant(TRUE); while (o < heap_segment_allocated(seg)) { pfnMethodTable(pvContext, o); if (contain_pointers (o)) { go_through_object_nostart (method_table (o), o, size(o), oo, { if (*oo) pfnObjRef(pvContext, oo); } ); } o += Align(size(o), alignment); } } #endif // FEATURE_BASICFREEZE HRESULT GCHeap::WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout) { #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { uint32_t dwRet = pGenGCHeap->background_gc_wait(awr_ignored, millisecondsTimeout); if (dwRet == WAIT_OBJECT_0) return S_OK; else if (dwRet == WAIT_TIMEOUT) return HRESULT_FROM_WIN32(ERROR_TIMEOUT); else return E_FAIL; // It is not clear if what the last error would be if the wait failed, // as there are too many layers in between. The best we can do is to return E_FAIL; } #endif return S_OK; } void GCHeap::TemporaryEnableConcurrentGC() { #ifdef BACKGROUND_GC gc_heap::temp_disable_concurrent_p = false; #endif //BACKGROUND_GC } void GCHeap::TemporaryDisableConcurrentGC() { #ifdef BACKGROUND_GC gc_heap::temp_disable_concurrent_p = true; #endif //BACKGROUND_GC } bool GCHeap::IsConcurrentGCEnabled() { #ifdef BACKGROUND_GC return (gc_heap::gc_can_use_concurrent && !(gc_heap::temp_disable_concurrent_p)); #else return FALSE; #endif //BACKGROUND_GC } void PopulateDacVars(GcDacVars *gcDacVars) { #define DEFINE_FIELD(field_name, field_type) offsetof(CLASS_NAME, field_name), #define DEFINE_DPTR_FIELD(field_name, field_type) offsetof(CLASS_NAME, field_name), #define DEFINE_ARRAY_FIELD(field_name, field_type, array_length) offsetof(CLASS_NAME, field_name), #define DEFINE_MISSING_FIELD(field_name) -1, #ifdef MULTIPLE_HEAPS static int gc_heap_field_offsets[] = { #define CLASS_NAME gc_heap #include "dac_gcheap_fields.h" #undef CLASS_NAME offsetof(gc_heap, generation_table) }; static_assert(sizeof(gc_heap_field_offsets) == (GENERATION_TABLE_FIELD_INDEX + 1) * sizeof(int), "GENERATION_TABLE_INDEX mismatch"); #endif //MULTIPLE_HEAPS static int generation_field_offsets[] = { #define CLASS_NAME generation #include "dac_generation_fields.h" #undef CLASS_NAME #undef DEFINE_MISSING_FIELD #undef DEFINE_ARRAY_FIELD #undef DEFINE_DPTR_FIELD #undef DEFINE_FIELD }; assert(gcDacVars != nullptr); *gcDacVars = {}; // Note: these version numbers are not actually checked by SOS, so if you change // the GC in a way that makes it incompatible with SOS, please change // SOS_BREAKING_CHANGE_VERSION in both the runtime and the diagnostics repo gcDacVars->major_version_number = 1; gcDacVars->minor_version_number = 0; #ifdef USE_REGIONS gcDacVars->minor_version_number |= 1; #endif //USE_REGIONS gcDacVars->built_with_svr = &g_built_with_svr_gc; gcDacVars->build_variant = &g_build_variant; gcDacVars->gc_structures_invalid_cnt = const_cast<int32_t*>(&GCScan::m_GcStructuresInvalidCnt); gcDacVars->generation_size = sizeof(generation); gcDacVars->total_generation_count = total_generation_count; gcDacVars->max_gen = &g_max_generation; #ifdef BACKGROUND_GC gcDacVars->current_c_gc_state = const_cast<c_gc_state*>(&gc_heap::current_c_gc_state); #else //BACKGROUND_GC gcDacVars->current_c_gc_state = 0; #endif //BACKGROUND_GC #ifndef MULTIPLE_HEAPS gcDacVars->ephemeral_heap_segment = reinterpret_cast<dac_heap_segment**>(&gc_heap::ephemeral_heap_segment); #ifdef BACKGROUND_GC gcDacVars->mark_array = &gc_heap::mark_array; gcDacVars->background_saved_lowest_address = &gc_heap::background_saved_lowest_address; gcDacVars->background_saved_highest_address = &gc_heap::background_saved_highest_address; gcDacVars->next_sweep_obj = &gc_heap::next_sweep_obj; #ifdef USE_REGIONS gcDacVars->saved_sweep_ephemeral_seg = 0; gcDacVars->saved_sweep_ephemeral_start = 0; #else gcDacVars->saved_sweep_ephemeral_seg = reinterpret_cast<dac_heap_segment**>(&gc_heap::saved_sweep_ephemeral_seg); gcDacVars->saved_sweep_ephemeral_start = &gc_heap::saved_sweep_ephemeral_start; #endif //USE_REGIONS #else //BACKGROUND_GC gcDacVars->mark_array = 0; gcDacVars->background_saved_lowest_address = 0; gcDacVars->background_saved_highest_address = 0; gcDacVars->next_sweep_obj = 0; gcDacVars->saved_sweep_ephemeral_seg = 0; gcDacVars->saved_sweep_ephemeral_start = 0; #endif //BACKGROUND_GC gcDacVars->alloc_allocated = &gc_heap::alloc_allocated; gcDacVars->oom_info = &gc_heap::oom_info; gcDacVars->finalize_queue = reinterpret_cast<dac_finalize_queue**>(&gc_heap::finalize_queue); gcDacVars->generation_table = reinterpret_cast<unused_generation**>(&gc_heap::generation_table); #ifdef GC_CONFIG_DRIVEN gcDacVars->gc_global_mechanisms = reinterpret_cast<size_t**>(&gc_global_mechanisms); gcDacVars->interesting_data_per_heap = reinterpret_cast<size_t**>(&gc_heap::interesting_data_per_heap); gcDacVars->compact_reasons_per_heap = reinterpret_cast<size_t**>(&gc_heap::compact_reasons_per_heap); gcDacVars->expand_mechanisms_per_heap = reinterpret_cast<size_t**>(&gc_heap::expand_mechanisms_per_heap); gcDacVars->interesting_mechanism_bits_per_heap = reinterpret_cast<size_t**>(&gc_heap::interesting_mechanism_bits_per_heap); #endif // GC_CONFIG_DRIVEN #ifdef HEAP_ANALYZE gcDacVars->internal_root_array = &gc_heap::internal_root_array; gcDacVars->internal_root_array_index = &gc_heap::internal_root_array_index; gcDacVars->heap_analyze_success = &gc_heap::heap_analyze_success; #endif // HEAP_ANALYZE #else gcDacVars->n_heaps = &gc_heap::n_heaps; gcDacVars->g_heaps = reinterpret_cast<unused_gc_heap***>(&gc_heap::g_heaps); gcDacVars->gc_heap_field_offsets = reinterpret_cast<int**>(&gc_heap_field_offsets); #endif // MULTIPLE_HEAPS gcDacVars->generation_field_offsets = reinterpret_cast<int**>(&generation_field_offsets); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // #Overview // // GC automatically manages memory allocated by managed code. // The design doc for GC can be found at docs/design/coreclr/botr/garbage-collection.md // // This file includes both the code for GC and the allocator. The most common // case for a GC to be triggered is from the allocator code. See // code:#try_allocate_more_space where it calls GarbageCollectGeneration. // // Entry points for the allocator are GCHeap::Alloc* which are called by the // allocation helpers in gcscan.cpp // #include "gcpriv.h" #if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) #define USE_VXSORT #else #define USE_INTROSORT #endif #ifdef DACCESS_COMPILE #error this source file should not be compiled with DACCESS_COMPILE! #endif //DACCESS_COMPILE // We just needed a simple random number generator for testing. class gc_rand { public: static uint64_t x; static uint64_t get_rand() { x = (314159269*x+278281) & 0x7FFFFFFF; return x; } // obtain random number in the range 0 .. r-1 static uint64_t get_rand(uint64_t r) { // require r >= 0 uint64_t x = (uint64_t)((get_rand() * r) >> 31); return x; } }; uint64_t gc_rand::x = 0; #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) BOOL bgc_heap_walk_for_etw_p = FALSE; #endif //BACKGROUND_GC && FEATURE_EVENT_TRACE #define MAX_PTR ((uint8_t*)(~(ptrdiff_t)0)) #define commit_min_th (16*OS_PAGE_SIZE) #define MIN_SOH_CROSS_GEN_REFS (400) #define MIN_LOH_CROSS_GEN_REFS (800) #ifdef SERVER_GC #define partial_size_th 100 #define num_partial_refs 64 #else //SERVER_GC #define partial_size_th 100 #define num_partial_refs 32 #endif //SERVER_GC #ifdef USE_REGIONS // If the pinned survived is 1+% of the region size, we don't demote. #define demotion_pinned_ratio_th (1) // If the survived / region_size is 90+%, we don't compact this region. #define sip_surv_ratio_th (90) // If the survived due to cards from old generations / region_size is 90+%, // we don't compact this region, also we immediately promote it to gen2. #define sip_old_card_surv_ratio_th (90) #else #define demotion_plug_len_th (6*1024*1024) #endif //USE_REGIONS #ifdef HOST_64BIT #define MARK_STACK_INITIAL_LENGTH 1024 #else #define MARK_STACK_INITIAL_LENGTH 128 #endif // HOST_64BIT #define LOH_PIN_QUEUE_LENGTH 100 #define LOH_PIN_DECAY 10 uint32_t yp_spin_count_unit = 0; size_t loh_size_threshold = LARGE_OBJECT_SIZE; #ifdef GC_CONFIG_DRIVEN int compact_ratio = 0; #endif //GC_CONFIG_DRIVEN // See comments in reset_memory. BOOL reset_mm_p = TRUE; #ifdef FEATURE_SVR_GC bool g_built_with_svr_gc = true; #else bool g_built_with_svr_gc = false; #endif // FEATURE_SVR_GC #if defined(BUILDENV_DEBUG) uint8_t g_build_variant = 0; #elif defined(BUILDENV_CHECKED) uint8_t g_build_variant = 1; #else uint8_t g_build_variant = 2; #endif //BUILDENV_DEBUG VOLATILE(int32_t) g_no_gc_lock = -1; #ifdef TRACE_GC const char * const allocation_state_str[] = { "start", "can_allocate", "cant_allocate", "retry_allocate", "try_fit", "try_fit_new_seg", "try_fit_after_cg", "try_fit_after_bgc", "try_free_full_seg_in_bgc", "try_free_after_bgc", "try_seg_end", "acquire_seg", "acquire_seg_after_cg", "acquire_seg_after_bgc", "check_and_wait_for_bgc", "trigger_full_compact_gc", "trigger_ephemeral_gc", "trigger_2nd_ephemeral_gc", "check_retry_seg" }; const char * const msl_take_state_str[] = { "get_large_seg", "bgc_loh_sweep", "wait_bgc", "block_gc", "clr_mem", "clr_large_mem", "t_eph_gc", "t_full_gc", "alloc_small", "alloc_large", "alloc_small_cant", "alloc_large_cant", "try_alloc", "try_budget" }; #endif //TRACE_GC // Keep this in sync with the definition of gc_reason #if (defined(DT_LOG) || defined(TRACE_GC)) static const char* const str_gc_reasons[] = { "alloc_soh", "induced", "lowmem", "empty", "alloc_loh", "oos_soh", "oos_loh", "induced_noforce", "gcstress", "induced_lowmem", "induced_compacting", "lowmemory_host", "pm_full_gc", "lowmemory_host_blocking" }; static const char* const str_gc_pause_modes[] = { "batch", "interactive", "low_latency", "sustained_low_latency", "no_gc" }; static const char* const str_root_kinds[] = { "Stack", "FinalizeQueue", "Handles", "OlderGen", "SizedRef", "Overflow", "DependentHandles", "NewFQ", "Steal", "BGC" }; #endif //DT_LOG || TRACE_GC inline BOOL is_induced (gc_reason reason) { return ((reason == reason_induced) || (reason == reason_induced_noforce) || (reason == reason_lowmemory) || (reason == reason_lowmemory_blocking) || (reason == reason_induced_compacting) || (reason == reason_lowmemory_host) || (reason == reason_lowmemory_host_blocking)); } inline BOOL is_induced_blocking (gc_reason reason) { return ((reason == reason_induced) || (reason == reason_lowmemory_blocking) || (reason == reason_induced_compacting) || (reason == reason_lowmemory_host_blocking)); } gc_oh_num gen_to_oh(int gen) { switch (gen) { case soh_gen0: return gc_oh_num::soh; case soh_gen1: return gc_oh_num::soh; case soh_gen2: return gc_oh_num::soh; case loh_generation: return gc_oh_num::loh; case poh_generation: return gc_oh_num::poh; default: return gc_oh_num::none; } } uint64_t qpf; double qpf_ms; double qpf_us; uint64_t GetHighPrecisionTimeStamp() { int64_t ts = GCToOSInterface::QueryPerformanceCounter(); return (uint64_t)((double)ts * qpf_us); } uint64_t RawGetHighPrecisionTimeStamp() { return (uint64_t)GCToOSInterface::QueryPerformanceCounter(); } #ifdef BGC_SERVO_TUNING bool gc_heap::bgc_tuning::enable_fl_tuning = false; uint32_t gc_heap::bgc_tuning::memory_load_goal = 0; uint32_t gc_heap::bgc_tuning::memory_load_goal_slack = 0; uint64_t gc_heap::bgc_tuning::available_memory_goal = 0; bool gc_heap::bgc_tuning::panic_activated_p = false; double gc_heap::bgc_tuning::accu_error_panic = 0.0; double gc_heap::bgc_tuning::above_goal_kp = 0.0; double gc_heap::bgc_tuning::above_goal_ki = 0.0; bool gc_heap::bgc_tuning::enable_kd = false; bool gc_heap::bgc_tuning::enable_ki = false; bool gc_heap::bgc_tuning::enable_smooth = false; bool gc_heap::bgc_tuning::enable_tbh = false; bool gc_heap::bgc_tuning::enable_ff = false; bool gc_heap::bgc_tuning::enable_gradual_d = false; double gc_heap::bgc_tuning::above_goal_kd = 0.0; double gc_heap::bgc_tuning::above_goal_ff = 0.0; double gc_heap::bgc_tuning::num_gen1s_smooth_factor = 0.0; double gc_heap::bgc_tuning::ml_kp = 0.0; double gc_heap::bgc_tuning::ml_ki = 0.0; double gc_heap::bgc_tuning::accu_error = 0.0; bool gc_heap::bgc_tuning::fl_tuning_triggered = false; size_t gc_heap::bgc_tuning::num_bgcs_since_tuning_trigger = 0; bool gc_heap::bgc_tuning::next_bgc_p = false; size_t gc_heap::bgc_tuning::gen1_index_last_bgc_end; size_t gc_heap::bgc_tuning::gen1_index_last_bgc_start; size_t gc_heap::bgc_tuning::gen1_index_last_bgc_sweep; size_t gc_heap::bgc_tuning::actual_num_gen1s_to_trigger; gc_heap::bgc_tuning::tuning_calculation gc_heap::bgc_tuning::gen_calc[2]; gc_heap::bgc_tuning::tuning_stats gc_heap::bgc_tuning::gen_stats[2]; gc_heap::bgc_tuning::bgc_size_data gc_heap::bgc_tuning::current_bgc_end_data[2]; size_t gc_heap::bgc_tuning::last_stepping_bgc_count = 0; uint32_t gc_heap::bgc_tuning::last_stepping_mem_load = 0; uint32_t gc_heap::bgc_tuning::stepping_interval = 0; bool gc_heap::bgc_tuning::use_stepping_trigger_p = true; double gc_heap::bgc_tuning::gen2_ratio_correction = 0.0; double gc_heap::bgc_tuning::ratio_correction_step = 0.0; int gc_heap::saved_bgc_tuning_reason = -1; #endif //BGC_SERVO_TUNING inline size_t round_up_power2 (size_t size) { // Get the 0-based index of the most-significant bit in size-1. // If the call failed (because size-1 is zero), size must be 1, // so return 1 (because 1 rounds up to itself). DWORD highest_set_bit_index; if (0 == #ifdef HOST_64BIT BitScanReverse64( #else BitScanReverse( #endif &highest_set_bit_index, size - 1)) { return 1; } // The size == 0 case (which would have overflowed to SIZE_MAX when decremented) // is handled below by relying on the fact that highest_set_bit_index is the maximum value // (31 or 63, depending on sizeof(size_t)) and left-shifting a value >= 2 by that // number of bits shifts in zeros from the right, resulting in an output of zero. return static_cast<size_t>(2) << highest_set_bit_index; } inline size_t round_down_power2 (size_t size) { // Get the 0-based index of the most-significant bit in size. // If the call failed, size must be zero so return zero. DWORD highest_set_bit_index; if (0 == #ifdef HOST_64BIT BitScanReverse64( #else BitScanReverse( #endif &highest_set_bit_index, size)) { return 0; } // Left-shift 1 by highest_set_bit_index to get back a value containing only // the most-significant set bit of size, i.e. size rounded down // to the next power-of-two value. return static_cast<size_t>(1) << highest_set_bit_index; } // Get the 0-based index of the most-significant bit in the value. // Returns -1 if the input value is zero (i.e. has no set bits). inline int index_of_highest_set_bit (size_t value) { // Get the 0-based index of the most-significant bit in the value. // If the call failed (because value is zero), return -1. DWORD highest_set_bit_index; return (0 == #ifdef HOST_64BIT BitScanReverse64( #else BitScanReverse( #endif &highest_set_bit_index, value)) ? -1 : static_cast<int>(highest_set_bit_index); } inline int relative_index_power2_plug (size_t power2) { int index = index_of_highest_set_bit (power2); assert (index <= MAX_INDEX_POWER2); return ((index < MIN_INDEX_POWER2) ? 0 : (index - MIN_INDEX_POWER2)); } inline int relative_index_power2_free_space (size_t power2) { int index = index_of_highest_set_bit (power2); assert (index <= MAX_INDEX_POWER2); return ((index < MIN_INDEX_POWER2) ? -1 : (index - MIN_INDEX_POWER2)); } #ifdef BACKGROUND_GC uint32_t bgc_alloc_spin_count = 140; uint32_t bgc_alloc_spin_count_loh = 16; uint32_t bgc_alloc_spin = 2; inline void c_write (uint32_t& place, uint32_t value) { Interlocked::Exchange (&place, value); } // If every heap's gen2 or gen3 size is less than this threshold we will do a blocking GC. const size_t bgc_min_per_heap = 4*1024*1024; int gc_heap::gchist_index = 0; gc_mechanisms_store gc_heap::gchist[max_history_count]; #ifndef MULTIPLE_HEAPS VOLATILE(bgc_state) gc_heap::current_bgc_state = bgc_not_in_process; int gc_heap::gchist_index_per_heap = 0; gc_heap::gc_history gc_heap::gchist_per_heap[max_history_count]; #endif //MULTIPLE_HEAPS #endif //BACKGROUND_GC void gc_heap::add_to_history_per_heap() { #if defined(GC_HISTORY) && defined(BACKGROUND_GC) gc_history* current_hist = &gchist_per_heap[gchist_index_per_heap]; current_hist->gc_index = settings.gc_index; current_hist->current_bgc_state = current_bgc_state; size_t elapsed = dd_gc_elapsed_time (dynamic_data_of (0)); current_hist->gc_time_ms = (uint32_t)(elapsed / 1000); current_hist->gc_efficiency = (elapsed ? (total_promoted_bytes / elapsed) : total_promoted_bytes); #ifndef USE_REGIONS current_hist->eph_low = generation_allocation_start (generation_of (max_generation - 1)); current_hist->gen0_start = generation_allocation_start (generation_of (0)); current_hist->eph_high = heap_segment_allocated (ephemeral_heap_segment); #endif //!USE_REGIONS #ifdef BACKGROUND_GC current_hist->bgc_lowest = background_saved_lowest_address; current_hist->bgc_highest = background_saved_highest_address; #endif //BACKGROUND_GC current_hist->fgc_lowest = lowest_address; current_hist->fgc_highest = highest_address; current_hist->g_lowest = g_gc_lowest_address; current_hist->g_highest = g_gc_highest_address; gchist_index_per_heap++; if (gchist_index_per_heap == max_history_count) { gchist_index_per_heap = 0; } #endif //GC_HISTORY && BACKGROUND_GC } void gc_heap::add_to_history() { #if defined(GC_HISTORY) && defined(BACKGROUND_GC) gc_mechanisms_store* current_settings = &gchist[gchist_index]; current_settings->store (&settings); gchist_index++; if (gchist_index == max_history_count) { gchist_index = 0; } #endif //GC_HISTORY && BACKGROUND_GC } #ifdef TRACE_GC BOOL gc_log_on = TRUE; FILE* gc_log = NULL; size_t gc_log_file_size = 0; size_t gc_buffer_index = 0; size_t max_gc_buffers = 0; static CLRCriticalSection gc_log_lock; // we keep this much in a buffer and only flush when the buffer is full #define gc_log_buffer_size (1024*1024) uint8_t* gc_log_buffer = 0; size_t gc_log_buffer_offset = 0; void log_va_msg(const char *fmt, va_list args) { gc_log_lock.Enter(); const int BUFFERSIZE = 4096; static char rgchBuffer[BUFFERSIZE]; char * pBuffer = &rgchBuffer[0]; pBuffer[0] = '\n'; int buffer_start = 1; int pid_len = sprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, "[%5d]", (uint32_t)GCToOSInterface::GetCurrentThreadIdForLogging()); buffer_start += pid_len; memset(&pBuffer[buffer_start], '-', BUFFERSIZE - buffer_start); int msg_len = _vsnprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, _TRUNCATE, fmt, args); if (msg_len == -1) { msg_len = BUFFERSIZE - buffer_start; } msg_len += buffer_start; if ((gc_log_buffer_offset + msg_len) > (gc_log_buffer_size - 12)) { char index_str[8]; memset (index_str, '-', 8); sprintf_s (index_str, ARRAY_SIZE(index_str), "%d", (int)gc_buffer_index); gc_log_buffer[gc_log_buffer_offset] = '\n'; memcpy (gc_log_buffer + (gc_log_buffer_offset + 1), index_str, 8); gc_buffer_index++; if (gc_buffer_index > max_gc_buffers) { fseek (gc_log, 0, SEEK_SET); gc_buffer_index = 0; } fwrite(gc_log_buffer, gc_log_buffer_size, 1, gc_log); fflush(gc_log); memset (gc_log_buffer, '*', gc_log_buffer_size); gc_log_buffer_offset = 0; } memcpy (gc_log_buffer + gc_log_buffer_offset, pBuffer, msg_len); gc_log_buffer_offset += msg_len; gc_log_lock.Leave(); } void GCLog (const char *fmt, ... ) { if (gc_log_on && (gc_log != NULL)) { va_list args; va_start(args, fmt); log_va_msg (fmt, args); va_end(args); } } #endif // TRACE_GC #ifdef GC_CONFIG_DRIVEN BOOL gc_config_log_on = FALSE; FILE* gc_config_log = NULL; // we keep this much in a buffer and only flush when the buffer is full #define gc_config_log_buffer_size (1*1024) // TEMP uint8_t* gc_config_log_buffer = 0; size_t gc_config_log_buffer_offset = 0; // For config since we log so little we keep the whole history. Also it's only // ever logged by one thread so no need to synchronize. void log_va_msg_config(const char *fmt, va_list args) { const int BUFFERSIZE = 256; static char rgchBuffer[BUFFERSIZE]; char * pBuffer = &rgchBuffer[0]; pBuffer[0] = '\n'; int buffer_start = 1; int msg_len = _vsnprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, _TRUNCATE, fmt, args ); assert (msg_len != -1); msg_len += buffer_start; if ((gc_config_log_buffer_offset + msg_len) > gc_config_log_buffer_size) { fwrite(gc_config_log_buffer, gc_config_log_buffer_offset, 1, gc_config_log); fflush(gc_config_log); gc_config_log_buffer_offset = 0; } memcpy (gc_config_log_buffer + gc_config_log_buffer_offset, pBuffer, msg_len); gc_config_log_buffer_offset += msg_len; } void GCLogConfig (const char *fmt, ... ) { if (gc_config_log_on && (gc_config_log != NULL)) { va_list args; va_start( args, fmt ); log_va_msg_config (fmt, args); } } #endif // GC_CONFIG_DRIVEN void GCHeap::Shutdown() { #if defined(TRACE_GC) && !defined(BUILD_AS_STANDALONE) if (gc_log_on && (gc_log != NULL)) { fwrite(gc_log_buffer, gc_log_buffer_offset, 1, gc_log); fflush(gc_log); fclose(gc_log); gc_log_buffer_offset = 0; } #endif //TRACE_GC && !BUILD_AS_STANDALONE } #ifdef SYNCHRONIZATION_STATS // Number of GCs have we done since we last logged. static unsigned int gc_count_during_log; // In ms. This is how often we print out stats. static const unsigned int log_interval = 5000; // Time (in ms) when we start a new log interval. static unsigned int log_start_tick; static unsigned int gc_lock_contended; static int64_t log_start_hires; // Cycles accumulated in SuspendEE during log_interval. static uint64_t suspend_ee_during_log; // Cycles accumulated in RestartEE during log_interval. static uint64_t restart_ee_during_log; static uint64_t gc_during_log; #endif //SYNCHRONIZATION_STATS void init_sync_log_stats() { #ifdef SYNCHRONIZATION_STATS if (gc_count_during_log == 0) { gc_heap::init_sync_stats(); suspend_ee_during_log = 0; restart_ee_during_log = 0; gc_during_log = 0; gc_lock_contended = 0; log_start_tick = GCToOSInterface::GetLowPrecisionTimeStamp(); log_start_hires = GCToOSInterface::QueryPerformanceCounter(); } gc_count_during_log++; #endif //SYNCHRONIZATION_STATS } void process_sync_log_stats() { #ifdef SYNCHRONIZATION_STATS unsigned int log_elapsed = GCToOSInterface::GetLowPrecisionTimeStamp() - log_start_tick; if (log_elapsed > log_interval) { uint64_t total = GCToOSInterface::QueryPerformanceCounter() - log_start_hires; // Print out the cycles we spent on average in each suspend and restart. printf("\n_________________________________________________________________________________\n" "Past %d(s): #%3d GCs; Total gc_lock contended: %8u; GC: %12u\n" "SuspendEE: %8u; RestartEE: %8u GC %.3f%%\n", log_interval / 1000, gc_count_during_log, gc_lock_contended, (unsigned int)(gc_during_log / gc_count_during_log), (unsigned int)(suspend_ee_during_log / gc_count_during_log), (unsigned int)(restart_ee_during_log / gc_count_during_log), (double)(100.0f * gc_during_log / total)); gc_heap::print_sync_stats(gc_count_during_log); gc_count_during_log = 0; } #endif //SYNCHRONIZATION_STATS } #ifdef MULTIPLE_HEAPS uint32_t g_num_active_processors = 0; // Note that when a join is no longer used we still keep the values here because // tooling already recognized them as having the meaning they were assigned originally. // It doesn't break tooling if we stop using them but does if we assign a new meaning // to them. enum gc_join_stage { gc_join_init_cpu_mapping = 0, gc_join_done = 1, gc_join_generation_determined = 2, gc_join_begin_mark_phase = 3, gc_join_scan_dependent_handles = 4, gc_join_rescan_dependent_handles = 5, gc_join_scan_sizedref_done = 6, gc_join_null_dead_short_weak = 7, gc_join_scan_finalization = 8, gc_join_null_dead_long_weak = 9, gc_join_null_dead_syncblk = 10, gc_join_decide_on_compaction = 11, gc_join_rearrange_segs_compaction = 12, gc_join_adjust_handle_age_compact = 13, gc_join_adjust_handle_age_sweep = 14, gc_join_begin_relocate_phase = 15, gc_join_relocate_phase_done = 16, gc_join_verify_objects_done = 17, gc_join_start_bgc = 18, gc_join_restart_ee = 19, gc_join_concurrent_overflow = 20, gc_join_suspend_ee = 21, gc_join_bgc_after_ephemeral = 22, gc_join_allow_fgc = 23, gc_join_bgc_sweep = 24, gc_join_suspend_ee_verify = 25, gc_join_restart_ee_verify = 26, gc_join_set_state_free = 27, gc_r_join_update_card_bundle = 28, gc_join_after_absorb = 29, gc_join_verify_copy_table = 30, gc_join_after_reset = 31, gc_join_after_ephemeral_sweep = 32, gc_join_after_profiler_heap_walk = 33, gc_join_minimal_gc = 34, gc_join_after_commit_soh_no_gc = 35, gc_join_expand_loh_no_gc = 36, gc_join_final_no_gc = 37, // No longer in use but do not remove, see comments for this enum. gc_join_disable_software_write_watch = 38, gc_join_max = 39 }; enum gc_join_flavor { join_flavor_server_gc = 0, join_flavor_bgc = 1 }; #define first_thread_arrived 2 #pragma warning(push) #pragma warning(disable:4324) // don't complain if DECLSPEC_ALIGN actually pads struct DECLSPEC_ALIGN(HS_CACHE_LINE_SIZE) join_structure { // Shared non volatile keep on separate line to prevent eviction int n_threads; // Keep polling/wait structures on separate line write once per join DECLSPEC_ALIGN(HS_CACHE_LINE_SIZE) GCEvent joined_event[3]; // the last event in the array is only used for first_thread_arrived. Volatile<int> lock_color; VOLATILE(BOOL) wait_done; VOLATILE(BOOL) joined_p; // Keep volatile counted locks on separate cache line write many per join DECLSPEC_ALIGN(HS_CACHE_LINE_SIZE) VOLATILE(int) join_lock; VOLATILE(int) r_join_lock; }; #pragma warning(pop) enum join_type { type_last_join = 0, type_join = 1, type_restart = 2, type_first_r_join = 3, type_r_join = 4 }; enum join_time { time_start = 0, time_end = 1 }; enum join_heap_index { join_heap_restart = 100, join_heap_r_restart = 200 }; class t_join { join_structure join_struct; int id; gc_join_flavor flavor; #ifdef JOIN_STATS uint64_t start[MAX_SUPPORTED_CPUS], end[MAX_SUPPORTED_CPUS], start_seq; // remember join id and last thread to arrive so restart can use these int thd; // we want to print statistics every 10 seconds - this is to remember the start of the 10 sec interval uint32_t start_tick; // counters for joins, in 1000's of clock cycles uint64_t elapsed_total[gc_join_max], wake_total[gc_join_max], seq_loss_total[gc_join_max], par_loss_total[gc_join_max], in_join_total[gc_join_max]; #endif //JOIN_STATS public: BOOL init (int n_th, gc_join_flavor f) { dprintf (JOIN_LOG, ("Initializing join structure")); join_struct.n_threads = n_th; join_struct.lock_color = 0; for (int i = 0; i < 3; i++) { if (!join_struct.joined_event[i].IsValid()) { join_struct.joined_p = FALSE; dprintf (JOIN_LOG, ("Creating join event %d", i)); // TODO - changing this to a non OS event // because this is also used by BGC threads which are // managed threads and WaitEx does not allow you to wait // for an OS event on a managed thread. // But we are not sure if this plays well in the hosting // environment. //join_struct.joined_event[i].CreateOSManualEventNoThrow(FALSE); if (!join_struct.joined_event[i].CreateManualEventNoThrow(FALSE)) return FALSE; } } join_struct.join_lock = join_struct.n_threads; join_struct.r_join_lock = join_struct.n_threads; join_struct.wait_done = FALSE; flavor = f; #ifdef JOIN_STATS start_tick = GCToOSInterface::GetLowPrecisionTimeStamp(); #endif //JOIN_STATS return TRUE; } void destroy () { dprintf (JOIN_LOG, ("Destroying join structure")); for (int i = 0; i < 3; i++) { if (join_struct.joined_event[i].IsValid()) join_struct.joined_event[i].CloseEvent(); } } inline void fire_event (int heap, join_time time, join_type type, int join_id) { FIRE_EVENT(GCJoin_V2, heap, time, type, join_id); } void join (gc_heap* gch, int join_id) { #ifdef JOIN_STATS // parallel execution ends here end[gch->heap_number] = get_ts(); #endif //JOIN_STATS assert (!join_struct.joined_p); int color = join_struct.lock_color.LoadWithoutBarrier(); if (Interlocked::Decrement(&join_struct.join_lock) != 0) { dprintf (JOIN_LOG, ("join%d(%d): Join() Waiting...join_lock is now %d", flavor, join_id, (int32_t)(join_struct.join_lock))); fire_event (gch->heap_number, time_start, type_join, join_id); //busy wait around the color if (color == join_struct.lock_color.LoadWithoutBarrier()) { respin: int spin_count = 128 * yp_spin_count_unit; for (int j = 0; j < spin_count; j++) { if (color != join_struct.lock_color.LoadWithoutBarrier()) { break; } YieldProcessor(); // indicate to the processor that we are spinning } // we've spun, and if color still hasn't changed, fall into hard wait if (color == join_struct.lock_color.LoadWithoutBarrier()) { dprintf (JOIN_LOG, ("join%d(%d): Join() hard wait on reset event %d, join_lock is now %d", flavor, join_id, color, (int32_t)(join_struct.join_lock))); uint32_t dwJoinWait = join_struct.joined_event[color].Wait(INFINITE, FALSE); if (dwJoinWait != WAIT_OBJECT_0) { STRESS_LOG1 (LF_GC, LL_FATALERROR, "joined event wait failed with code: %Ix", dwJoinWait); FATAL_GC_ERROR (); } } // avoid race due to the thread about to reset the event (occasionally) being preempted before ResetEvent() if (color == join_struct.lock_color.LoadWithoutBarrier()) { goto respin; } dprintf (JOIN_LOG, ("join%d(%d): Join() done, join_lock is %d", flavor, join_id, (int32_t)(join_struct.join_lock))); } fire_event (gch->heap_number, time_end, type_join, join_id); #ifdef JOIN_STATS // parallel execution starts here start[gch->heap_number] = get_ts(); Interlocked::ExchangeAdd(&in_join_total[join_id], (start[gch->heap_number] - end[gch->heap_number])); #endif //JOIN_STATS } else { fire_event (gch->heap_number, time_start, type_last_join, join_id); join_struct.joined_p = TRUE; dprintf (JOIN_LOG, ("join%d(%d): Last thread to complete the join, setting id", flavor, join_id)); join_struct.joined_event[!color].Reset(); id = join_id; #ifdef JOIN_STATS // remember the join id, the last thread arriving, the start of the sequential phase, // and keep track of the cycles spent waiting in the join thd = gch->heap_number; start_seq = get_ts(); Interlocked::ExchangeAdd(&in_join_total[join_id], (start_seq - end[gch->heap_number])); #endif //JOIN_STATS } } // Reverse join - first thread gets here does the work; other threads will only proceed // after the work is done. // Note that you cannot call this twice in a row on the same thread. Plus there's no // need to call it twice in row - you should just merge the work. BOOL r_join (gc_heap* gch, int join_id) { if (join_struct.n_threads == 1) { return TRUE; } if (Interlocked::CompareExchange(&join_struct.r_join_lock, 0, join_struct.n_threads) == 0) { fire_event (gch->heap_number, time_start, type_join, join_id); dprintf (JOIN_LOG, ("r_join() Waiting...")); //busy wait around the color respin: int spin_count = 256 * yp_spin_count_unit; for (int j = 0; j < spin_count; j++) { if (join_struct.wait_done) { break; } YieldProcessor(); // indicate to the processor that we are spinning } // we've spun, and if color still hasn't changed, fall into hard wait if (!join_struct.wait_done) { dprintf (JOIN_LOG, ("Join() hard wait on reset event %d", first_thread_arrived)); uint32_t dwJoinWait = join_struct.joined_event[first_thread_arrived].Wait(INFINITE, FALSE); if (dwJoinWait != WAIT_OBJECT_0) { STRESS_LOG1 (LF_GC, LL_FATALERROR, "joined event wait failed with code: %Ix", dwJoinWait); FATAL_GC_ERROR (); } } // avoid race due to the thread about to reset the event (occasionally) being preempted before ResetEvent() if (!join_struct.wait_done) { goto respin; } dprintf (JOIN_LOG, ("r_join() done")); fire_event (gch->heap_number, time_end, type_join, join_id); return FALSE; } else { fire_event (gch->heap_number, time_start, type_first_r_join, join_id); return TRUE; } } #ifdef JOIN_STATS uint64_t get_ts() { return GCToOSInterface::QueryPerformanceCounter(); } void start_ts (gc_heap* gch) { // parallel execution ends here start[gch->heap_number] = get_ts(); } #endif //JOIN_STATS void restart() { #ifdef JOIN_STATS uint64_t elapsed_seq = get_ts() - start_seq; uint64_t max = 0, sum = 0, wake = 0; uint64_t min_ts = start[0]; for (int i = 1; i < join_struct.n_threads; i++) { if(min_ts > start[i]) min_ts = start[i]; } for (int i = 0; i < join_struct.n_threads; i++) { uint64_t wake_delay = start[i] - min_ts; uint64_t elapsed = end[i] - start[i]; if (max < elapsed) max = elapsed; sum += elapsed; wake += wake_delay; } uint64_t seq_loss = (join_struct.n_threads - 1)*elapsed_seq; uint64_t par_loss = join_struct.n_threads*max - sum; double efficiency = 0.0; if (max > 0) efficiency = sum*100.0/(join_struct.n_threads*max); const double ts_scale = 1e-6; // enable this printf to get statistics on each individual join as it occurs //printf("join #%3d seq_loss = %5g par_loss = %5g efficiency = %3.0f%%\n", join_id, ts_scale*seq_loss, ts_scale*par_loss, efficiency); elapsed_total[id] += sum; wake_total[id] += wake; seq_loss_total[id] += seq_loss; par_loss_total[id] += par_loss; // every 10 seconds, print a summary of the time spent in each type of join if (GCToOSInterface::GetLowPrecisionTimeStamp() - start_tick > 10*1000) { printf("**** summary *****\n"); for (int i = 0; i < 16; i++) { printf("join #%3d elapsed_total = %8g wake_loss = %8g seq_loss = %8g par_loss = %8g in_join_total = %8g\n", i, ts_scale*elapsed_total[i], ts_scale*wake_total[i], ts_scale*seq_loss_total[i], ts_scale*par_loss_total[i], ts_scale*in_join_total[i]); elapsed_total[i] = wake_total[i] = seq_loss_total[i] = par_loss_total[i] = in_join_total[i] = 0; } start_tick = GCToOSInterface::GetLowPrecisionTimeStamp(); } #endif //JOIN_STATS fire_event (join_heap_restart, time_start, type_restart, -1); assert (join_struct.joined_p); join_struct.joined_p = FALSE; join_struct.join_lock = join_struct.n_threads; dprintf (JOIN_LOG, ("join%d(%d): Restarting from join: join_lock is %d", flavor, id, (int32_t)(join_struct.join_lock))); int color = join_struct.lock_color.LoadWithoutBarrier(); join_struct.lock_color = !color; join_struct.joined_event[color].Set(); fire_event (join_heap_restart, time_end, type_restart, -1); #ifdef JOIN_STATS start[thd] = get_ts(); #endif //JOIN_STATS } BOOL joined() { dprintf (JOIN_LOG, ("join%d(%d): joined, join_lock is %d", flavor, id, (int32_t)(join_struct.join_lock))); return join_struct.joined_p; } void r_restart() { if (join_struct.n_threads != 1) { fire_event (join_heap_r_restart, time_start, type_restart, -1); join_struct.wait_done = TRUE; join_struct.joined_event[first_thread_arrived].Set(); fire_event (join_heap_r_restart, time_end, type_restart, -1); } } void r_init() { if (join_struct.n_threads != 1) { join_struct.r_join_lock = join_struct.n_threads; join_struct.wait_done = FALSE; join_struct.joined_event[first_thread_arrived].Reset(); } } }; t_join gc_t_join; #ifdef BACKGROUND_GC t_join bgc_t_join; #endif //BACKGROUND_GC #endif //MULTIPLE_HEAPS #define spin_and_switch(count_to_spin, expr) \ { \ for (int j = 0; j < count_to_spin; j++) \ { \ if (expr) \ { \ break;\ } \ YieldProcessor(); \ } \ if (!(expr)) \ { \ GCToOSInterface::YieldThread(0); \ } \ } #ifdef BACKGROUND_GC #define max_pending_allocs 64 class exclusive_sync { VOLATILE(uint8_t*) rwp_object; VOLATILE(int32_t) needs_checking; int spin_count; uint8_t cache_separator[HS_CACHE_LINE_SIZE - sizeof (int) - sizeof (int32_t)]; // TODO - perhaps each object should be on its own cache line... VOLATILE(uint8_t*) alloc_objects[max_pending_allocs]; int find_free_index () { for (int i = 0; i < max_pending_allocs; i++) { if (alloc_objects [i] == (uint8_t*)0) { return i; } } return -1; } public: void init() { spin_count = 32 * (g_num_processors - 1); rwp_object = 0; needs_checking = 0; for (int i = 0; i < max_pending_allocs; i++) { alloc_objects [i] = (uint8_t*)0; } } void check() { for (int i = 0; i < max_pending_allocs; i++) { if (alloc_objects [i] != (uint8_t*)0) { FATAL_GC_ERROR(); } } } void bgc_mark_set (uint8_t* obj) { dprintf (3, ("cm: probing %Ix", obj)); retry: if (Interlocked::CompareExchange(&needs_checking, 1, 0) == 0) { // If we spend too much time spending all the allocs, // consider adding a high water mark and scan up // to that; we'll need to interlock in done when // we update the high watermark. for (int i = 0; i < max_pending_allocs; i++) { if (obj == alloc_objects[i]) { needs_checking = 0; dprintf (3, ("cm: will spin")); spin_and_switch (spin_count, (obj != alloc_objects[i])); goto retry; } } rwp_object = obj; needs_checking = 0; dprintf (3, ("cm: set %Ix", obj)); return; } else { spin_and_switch (spin_count, (needs_checking == 0)); goto retry; } } int uoh_alloc_set (uint8_t* obj) { if (!gc_heap::cm_in_progress) { return -1; } retry: dprintf (3, ("uoh alloc: probing %Ix", obj)); if (Interlocked::CompareExchange(&needs_checking, 1, 0) == 0) { if (obj == rwp_object) { needs_checking = 0; spin_and_switch (spin_count, (obj != rwp_object)); goto retry; } else { int cookie = find_free_index(); if (cookie != -1) { alloc_objects[cookie] = obj; needs_checking = 0; //if (cookie >= 4) //{ // GCToOSInterface::DebugBreak(); //} dprintf (3, ("uoh alloc: set %Ix at %d", obj, cookie)); return cookie; } else { needs_checking = 0; dprintf (3, ("uoh alloc: setting %Ix will spin to acquire a free index", obj)); spin_and_switch (spin_count, (find_free_index () != -1)); goto retry; } } } else { dprintf (3, ("uoh alloc: will spin on checking %Ix", obj)); spin_and_switch (spin_count, (needs_checking == 0)); goto retry; } } void bgc_mark_done () { dprintf (3, ("cm: release lock on %Ix", (uint8_t *)rwp_object)); rwp_object = 0; } void uoh_alloc_done_with_index (int index) { dprintf (3, ("uoh alloc: release lock on %Ix based on %d", (uint8_t *)alloc_objects[index], index)); assert ((index >= 0) && (index < max_pending_allocs)); alloc_objects[index] = (uint8_t*)0; } void uoh_alloc_done (uint8_t* obj) { if (!gc_heap::cm_in_progress) { return; } for (int i = 0; i < max_pending_allocs; i++) { if (alloc_objects [i] == obj) { uoh_alloc_done_with_index(i); return; } } } }; #endif //BACKGROUND_GC void reset_memory (uint8_t* o, size_t sizeo); #ifdef WRITE_WATCH #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP static bool virtual_alloc_hardware_write_watch = false; #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP static bool hardware_write_watch_capability = false; void hardware_write_watch_api_supported() { if (GCToOSInterface::SupportsWriteWatch()) { hardware_write_watch_capability = true; dprintf (2, ("WriteWatch supported")); } else { dprintf (2,("WriteWatch not supported")); } } inline bool can_use_hardware_write_watch() { return hardware_write_watch_capability; } inline bool can_use_write_watch_for_gc_heap() { #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP return true; #else // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP return can_use_hardware_write_watch(); #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP } inline bool can_use_write_watch_for_card_table() { #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES return true; #else return can_use_hardware_write_watch(); #endif } #else #define mem_reserve (MEM_RESERVE) #endif //WRITE_WATCH //check if the low memory notification is supported void WaitLongerNoInstru (int i) { // every 8th attempt: bool bToggleGC = GCToEEInterface::EnablePreemptiveGC(); // if we're waiting for gc to finish, we should block immediately if (g_fSuspensionPending == 0) { if (g_num_processors > 1) { YieldProcessor(); // indicate to the processor that we are spinning if (i & 0x01f) GCToOSInterface::YieldThread (0); else GCToOSInterface::Sleep (5); } else GCToOSInterface::Sleep (5); } // If CLR is hosted, a thread may reach here while it is in preemptive GC mode, // or it has no Thread object, in order to force a task to yield, or to triger a GC. // It is important that the thread is going to wait for GC. Otherwise the thread // is in a tight loop. If the thread has high priority, the perf is going to be very BAD. if (bToggleGC) { #ifdef _DEBUG // In debug builds, all enter_spin_lock operations go through this code. If a GC has // started, it is important to block until the GC thread calls set_gc_done (since it is // guaranteed to have cleared g_TrapReturningThreads by this point). This avoids livelock // conditions which can otherwise occur if threads are allowed to spin in this function // (and therefore starve the GC thread) between the point when the GC thread sets the // WaitForGC event and the point when the GC thread clears g_TrapReturningThreads. if (gc_heap::gc_started) { gc_heap::wait_for_gc_done(); } #endif // _DEBUG GCToEEInterface::DisablePreemptiveGC(); } else if (g_fSuspensionPending > 0) { g_theGCHeap->WaitUntilGCComplete(); } } inline static void safe_switch_to_thread() { bool cooperative_mode = gc_heap::enable_preemptive(); GCToOSInterface::YieldThread(0); gc_heap::disable_preemptive(cooperative_mode); } // // We need the following methods to have volatile arguments, so that they can accept // raw pointers in addition to the results of the & operator on Volatile<T>. // inline static void enter_spin_lock_noinstru (RAW_KEYWORD(volatile) int32_t* lock) { retry: if (Interlocked::CompareExchange(lock, 0, -1) >= 0) { unsigned int i = 0; while (VolatileLoad(lock) >= 0) { if ((++i & 7) && !IsGCInProgress()) { if (g_num_processors > 1) { #ifndef MULTIPLE_HEAPS int spin_count = 32 * yp_spin_count_unit; #else //!MULTIPLE_HEAPS int spin_count = yp_spin_count_unit; #endif //!MULTIPLE_HEAPS for (int j = 0; j < spin_count; j++) { if (VolatileLoad(lock) < 0 || IsGCInProgress()) break; YieldProcessor(); // indicate to the processor that we are spinning } if (VolatileLoad(lock) >= 0 && !IsGCInProgress()) { safe_switch_to_thread(); } } else { safe_switch_to_thread(); } } else { WaitLongerNoInstru(i); } } goto retry; } } inline static BOOL try_enter_spin_lock_noinstru(RAW_KEYWORD(volatile) int32_t* lock) { return (Interlocked::CompareExchange(&*lock, 0, -1) < 0); } inline static void leave_spin_lock_noinstru (RAW_KEYWORD(volatile) int32_t* lock) { VolatileStore<int32_t>((int32_t*)lock, -1); } #ifdef _DEBUG inline static void enter_spin_lock(GCSpinLock *pSpinLock) { enter_spin_lock_noinstru(&pSpinLock->lock); assert (pSpinLock->holding_thread == (Thread*)-1); pSpinLock->holding_thread = GCToEEInterface::GetThread(); } inline static BOOL try_enter_spin_lock(GCSpinLock *pSpinLock) { BOOL ret = try_enter_spin_lock_noinstru(&pSpinLock->lock); if (ret) pSpinLock->holding_thread = GCToEEInterface::GetThread(); return ret; } inline static void leave_spin_lock(GCSpinLock *pSpinLock) { bool gc_thread_p = GCToEEInterface::WasCurrentThreadCreatedByGC(); pSpinLock->released_by_gc_p = gc_thread_p; pSpinLock->holding_thread = (Thread*) -1; if (pSpinLock->lock != -1) leave_spin_lock_noinstru(&pSpinLock->lock); } #define ASSERT_HOLDING_SPIN_LOCK(pSpinLock) \ _ASSERTE((pSpinLock)->holding_thread == GCToEEInterface::GetThread()); #define ASSERT_NOT_HOLDING_SPIN_LOCK(pSpinLock) \ _ASSERTE((pSpinLock)->holding_thread != GCToEEInterface::GetThread()); #else //_DEBUG //In the concurrent version, the Enable/DisablePreemptiveGC is optional because //the gc thread call WaitLonger. void WaitLonger (int i #ifdef SYNCHRONIZATION_STATS , GCSpinLock* spin_lock #endif //SYNCHRONIZATION_STATS ) { #ifdef SYNCHRONIZATION_STATS (spin_lock->num_wait_longer)++; #endif //SYNCHRONIZATION_STATS // every 8th attempt: bool bToggleGC = GCToEEInterface::EnablePreemptiveGC(); assert (bToggleGC); // if we're waiting for gc to finish, we should block immediately if (!gc_heap::gc_started) { #ifdef SYNCHRONIZATION_STATS (spin_lock->num_switch_thread_w)++; #endif //SYNCHRONIZATION_STATS if (g_num_processors > 1) { YieldProcessor(); // indicate to the processor that we are spinning if (i & 0x01f) GCToOSInterface::YieldThread (0); else GCToOSInterface::Sleep (5); } else GCToOSInterface::Sleep (5); } // If CLR is hosted, a thread may reach here while it is in preemptive GC mode, // or it has no Thread object, in order to force a task to yield, or to triger a GC. // It is important that the thread is going to wait for GC. Otherwise the thread // is in a tight loop. If the thread has high priority, the perf is going to be very BAD. if (gc_heap::gc_started) { gc_heap::wait_for_gc_done(); } if (bToggleGC) { #ifdef SYNCHRONIZATION_STATS (spin_lock->num_disable_preemptive_w)++; #endif //SYNCHRONIZATION_STATS GCToEEInterface::DisablePreemptiveGC(); } } inline static void enter_spin_lock (GCSpinLock* spin_lock) { retry: if (Interlocked::CompareExchange(&spin_lock->lock, 0, -1) >= 0) { unsigned int i = 0; while (spin_lock->lock >= 0) { if ((++i & 7) && !gc_heap::gc_started) { if (g_num_processors > 1) { #ifndef MULTIPLE_HEAPS int spin_count = 32 * yp_spin_count_unit; #else //!MULTIPLE_HEAPS int spin_count = yp_spin_count_unit; #endif //!MULTIPLE_HEAPS for (int j = 0; j < spin_count; j++) { if (spin_lock->lock < 0 || gc_heap::gc_started) break; YieldProcessor(); // indicate to the processor that we are spinning } if (spin_lock->lock >= 0 && !gc_heap::gc_started) { #ifdef SYNCHRONIZATION_STATS (spin_lock->num_switch_thread)++; #endif //SYNCHRONIZATION_STATS bool cooperative_mode = gc_heap::enable_preemptive (); GCToOSInterface::YieldThread(0); gc_heap::disable_preemptive (cooperative_mode); } } else GCToOSInterface::YieldThread(0); } else { WaitLonger(i #ifdef SYNCHRONIZATION_STATS , spin_lock #endif //SYNCHRONIZATION_STATS ); } } goto retry; } } inline static BOOL try_enter_spin_lock(GCSpinLock* spin_lock) { return (Interlocked::CompareExchange(&spin_lock->lock, 0, -1) < 0); } inline static void leave_spin_lock (GCSpinLock * spin_lock) { spin_lock->lock = -1; } #define ASSERT_HOLDING_SPIN_LOCK(pSpinLock) #endif //_DEBUG bool gc_heap::enable_preemptive () { return GCToEEInterface::EnablePreemptiveGC(); } void gc_heap::disable_preemptive (bool restore_cooperative) { if (restore_cooperative) { GCToEEInterface::DisablePreemptiveGC(); } } typedef void ** PTR_PTR; inline void memclr ( uint8_t* mem, size_t size) { dprintf (3, ("MEMCLR: %Ix, %d", mem, size)); assert ((size & (sizeof(PTR_PTR)-1)) == 0); assert (sizeof(PTR_PTR) == DATA_ALIGNMENT); memset (mem, 0, size); } void memcopy (uint8_t* dmem, uint8_t* smem, size_t size) { const size_t sz4ptr = sizeof(PTR_PTR)*4; const size_t sz2ptr = sizeof(PTR_PTR)*2; const size_t sz1ptr = sizeof(PTR_PTR)*1; assert ((size & (sizeof (PTR_PTR)-1)) == 0); assert (sizeof(PTR_PTR) == DATA_ALIGNMENT); // copy in groups of four pointer sized things at a time if (size >= sz4ptr) { do { ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0]; ((PTR_PTR)dmem)[1] = ((PTR_PTR)smem)[1]; ((PTR_PTR)dmem)[2] = ((PTR_PTR)smem)[2]; ((PTR_PTR)dmem)[3] = ((PTR_PTR)smem)[3]; dmem += sz4ptr; smem += sz4ptr; } while ((size -= sz4ptr) >= sz4ptr); } // still two pointer sized things or more left to copy? if (size & sz2ptr) { ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0]; ((PTR_PTR)dmem)[1] = ((PTR_PTR)smem)[1]; dmem += sz2ptr; smem += sz2ptr; } // still one pointer sized thing left to copy? if (size & sz1ptr) { ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0]; } } inline ptrdiff_t round_down (ptrdiff_t add, int pitch) { return ((add / pitch) * pitch); } #if defined(FEATURE_STRUCTALIGN) && defined(RESPECT_LARGE_ALIGNMENT) // FEATURE_STRUCTALIGN allows the compiler to dictate the alignment, // i.e, if a larger alignment matters or is beneficial, the compiler // generated info tells us so. RESPECT_LARGE_ALIGNMENT is just the // converse - it's a heuristic for the GC to use a larger alignment. #error FEATURE_STRUCTALIGN should imply !RESPECT_LARGE_ALIGNMENT #endif #if defined(FEATURE_STRUCTALIGN) && defined(FEATURE_LOH_COMPACTION) #error FEATURE_STRUCTALIGN and FEATURE_LOH_COMPACTION are mutually exclusive #endif // Returns true if two pointers have the same large (double than normal) alignment. inline BOOL same_large_alignment_p (uint8_t* p1, uint8_t* p2) { #ifdef RESPECT_LARGE_ALIGNMENT const size_t LARGE_ALIGNMENT_MASK = 2 * DATA_ALIGNMENT - 1; return ((((size_t)p1 ^ (size_t)p2) & LARGE_ALIGNMENT_MASK) == 0); #else UNREFERENCED_PARAMETER(p1); UNREFERENCED_PARAMETER(p2); return TRUE; #endif // RESPECT_LARGE_ALIGNMENT } // Determines the padding size required to fix large alignment during relocation. inline size_t switch_alignment_size (BOOL already_padded_p) { #ifndef RESPECT_LARGE_ALIGNMENT assert (!"Should not be called"); #endif // RESPECT_LARGE_ALIGNMENT if (already_padded_p) return DATA_ALIGNMENT; else return Align (min_obj_size) | DATA_ALIGNMENT; } #ifdef FEATURE_STRUCTALIGN void set_node_aligninfo (uint8_t *node, int requiredAlignment, ptrdiff_t pad); void clear_node_aligninfo (uint8_t *node); #else // FEATURE_STRUCTALIGN #define node_realigned(node) (((plug_and_reloc*)(node))[-1].reloc & 1) void set_node_realigned (uint8_t* node); void clear_node_realigned(uint8_t* node); #endif // FEATURE_STRUCTALIGN inline size_t AlignQword (size_t nbytes) { #ifdef FEATURE_STRUCTALIGN // This function is used to align everything on the large object // heap to an 8-byte boundary, to reduce the number of unaligned // accesses to (say) arrays of doubles. With FEATURE_STRUCTALIGN, // the compiler dictates the optimal alignment instead of having // a heuristic in the GC. return Align (nbytes); #else // FEATURE_STRUCTALIGN return (nbytes + 7) & ~7; #endif // FEATURE_STRUCTALIGN } inline BOOL Aligned (size_t n) { return (n & ALIGNCONST) == 0; } #define OBJECT_ALIGNMENT_OFFSET (sizeof(MethodTable *)) #ifdef FEATURE_STRUCTALIGN #define MAX_STRUCTALIGN OS_PAGE_SIZE #else // FEATURE_STRUCTALIGN #define MAX_STRUCTALIGN 0 #endif // FEATURE_STRUCTALIGN #ifdef FEATURE_STRUCTALIGN inline ptrdiff_t AdjustmentForMinPadSize(ptrdiff_t pad, int requiredAlignment) { // The resulting alignpad must be either 0 or at least min_obj_size. // Note that by computing the following difference on unsigned types, // we can do the range check 0 < alignpad < min_obj_size with a // single conditional branch. if ((size_t)(pad - DATA_ALIGNMENT) < Align (min_obj_size) - DATA_ALIGNMENT) { return requiredAlignment; } return 0; } inline uint8_t* StructAlign (uint8_t* origPtr, int requiredAlignment, ptrdiff_t alignmentOffset=OBJECT_ALIGNMENT_OFFSET) { // required alignment must be a power of two _ASSERTE(((size_t)origPtr & ALIGNCONST) == 0); _ASSERTE(((requiredAlignment - 1) & requiredAlignment) == 0); _ASSERTE(requiredAlignment >= sizeof(void *)); _ASSERTE(requiredAlignment <= MAX_STRUCTALIGN); // When this method is invoked for individual objects (i.e., alignmentOffset // is just the size of the PostHeader), what needs to be aligned when // we're done is the pointer to the payload of the object (which means // the actual resulting object pointer is typically not aligned). uint8_t* result = (uint8_t*)Align ((size_t)origPtr + alignmentOffset, requiredAlignment-1) - alignmentOffset; ptrdiff_t alignpad = result - origPtr; return result + AdjustmentForMinPadSize (alignpad, requiredAlignment); } inline ptrdiff_t ComputeStructAlignPad (uint8_t* plug, int requiredAlignment, size_t alignmentOffset=OBJECT_ALIGNMENT_OFFSET) { return StructAlign (plug, requiredAlignment, alignmentOffset) - plug; } BOOL IsStructAligned (uint8_t *ptr, int requiredAlignment) { return StructAlign (ptr, requiredAlignment) == ptr; } inline ptrdiff_t ComputeMaxStructAlignPad (int requiredAlignment) { if (requiredAlignment == DATA_ALIGNMENT) return 0; // Since a non-zero alignment padding cannot be less than min_obj_size (so we can fit the // alignment padding object), the worst-case alignment padding is correspondingly larger // than the required alignment. return requiredAlignment + Align (min_obj_size) - DATA_ALIGNMENT; } inline ptrdiff_t ComputeMaxStructAlignPadLarge (int requiredAlignment) { if (requiredAlignment <= get_alignment_constant (TRUE)+1) return 0; // This is the same as ComputeMaxStructAlignPad, except that in addition to leaving space // for padding before the actual object, it also leaves space for filling a gap after the // actual object. This is needed on the large object heap, as the outer allocation functions // don't operate on an allocation context (which would have left space for the final gap). return requiredAlignment + Align (min_obj_size) * 2 - DATA_ALIGNMENT; } uint8_t* gc_heap::pad_for_alignment (uint8_t* newAlloc, int requiredAlignment, size_t size, alloc_context* acontext) { uint8_t* alignedPtr = StructAlign (newAlloc, requiredAlignment); if (alignedPtr != newAlloc) { make_unused_array (newAlloc, alignedPtr - newAlloc); } acontext->alloc_ptr = alignedPtr + Align (size); return alignedPtr; } uint8_t* gc_heap::pad_for_alignment_large (uint8_t* newAlloc, int requiredAlignment, size_t size) { uint8_t* alignedPtr = StructAlign (newAlloc, requiredAlignment); if (alignedPtr != newAlloc) { make_unused_array (newAlloc, alignedPtr - newAlloc); } if (alignedPtr < newAlloc + ComputeMaxStructAlignPadLarge (requiredAlignment)) { make_unused_array (alignedPtr + AlignQword (size), newAlloc + ComputeMaxStructAlignPadLarge (requiredAlignment) - alignedPtr); } return alignedPtr; } #else // FEATURE_STRUCTALIGN #define ComputeMaxStructAlignPad(requiredAlignment) 0 #define ComputeMaxStructAlignPadLarge(requiredAlignment) 0 #endif // FEATURE_STRUCTALIGN //CLR_SIZE is the max amount of bytes from gen0 that is set to 0 in one chunk #ifdef SERVER_GC #define CLR_SIZE ((size_t)(8*1024)) #else //SERVER_GC #define CLR_SIZE ((size_t)(8*1024)) #endif //SERVER_GC #define END_SPACE_AFTER_GC (loh_size_threshold + MAX_STRUCTALIGN) // When we fit into the free list we need an extra of a min obj #define END_SPACE_AFTER_GC_FL (END_SPACE_AFTER_GC + Align (min_obj_size)) #if defined(BACKGROUND_GC) && !defined(USE_REGIONS) #define SEGMENT_INITIAL_COMMIT (2*OS_PAGE_SIZE) #else #define SEGMENT_INITIAL_COMMIT (OS_PAGE_SIZE) #endif //BACKGROUND_GC && !USE_REGIONS // This is always power of 2. const size_t min_segment_size_hard_limit = 1024*1024*16; inline size_t align_on_segment_hard_limit (size_t add) { return ((size_t)(add + (min_segment_size_hard_limit - 1)) & ~(min_segment_size_hard_limit - 1)); } #ifdef SERVER_GC #ifdef HOST_64BIT #define INITIAL_ALLOC ((size_t)((size_t)4*1024*1024*1024)) #define LHEAP_ALLOC ((size_t)(1024*1024*256)) #else #define INITIAL_ALLOC ((size_t)(1024*1024*64)) #define LHEAP_ALLOC ((size_t)(1024*1024*32)) #endif // HOST_64BIT #else //SERVER_GC #ifdef HOST_64BIT #define INITIAL_ALLOC ((size_t)(1024*1024*256)) #define LHEAP_ALLOC ((size_t)(1024*1024*128)) #else #define INITIAL_ALLOC ((size_t)(1024*1024*16)) #define LHEAP_ALLOC ((size_t)(1024*1024*16)) #endif // HOST_64BIT #endif //SERVER_GC const size_t etw_allocation_tick = 100*1024; const size_t low_latency_alloc = 256*1024; const size_t fgn_check_quantum = 2*1024*1024; #ifdef MH_SC_MARK const int max_snoop_level = 128; #endif //MH_SC_MARK #ifdef CARD_BUNDLE //threshold of heap size to turn on card bundles. #define SH_TH_CARD_BUNDLE (40*1024*1024) #define MH_TH_CARD_BUNDLE (180*1024*1024) #endif //CARD_BUNDLE // min size to decommit to make the OS call worthwhile #define MIN_DECOMMIT_SIZE (100*OS_PAGE_SIZE) // max size to decommit per millisecond #define DECOMMIT_SIZE_PER_MILLISECOND (160*1024) // time in milliseconds between decommit steps #define DECOMMIT_TIME_STEP_MILLISECONDS (100) inline size_t align_on_page (size_t add) { return ((add + OS_PAGE_SIZE - 1) & ~((size_t)OS_PAGE_SIZE - 1)); } inline uint8_t* align_on_page (uint8_t* add) { return (uint8_t*)align_on_page ((size_t) add); } inline size_t align_lower_page (size_t add) { return (add & ~((size_t)OS_PAGE_SIZE - 1)); } inline uint8_t* align_lower_page (uint8_t* add) { return (uint8_t*)align_lower_page ((size_t)add); } inline size_t align_write_watch_lower_page (size_t add) { return (add & ~(WRITE_WATCH_UNIT_SIZE - 1)); } inline uint8_t* align_write_watch_lower_page (uint8_t* add) { return (uint8_t*)align_lower_page ((size_t)add); } inline BOOL power_of_two_p (size_t integer) { return !(integer & (integer-1)); } inline BOOL oddp (size_t integer) { return (integer & 1) != 0; } // we only ever use this for WORDs. size_t logcount (size_t word) { //counts the number of high bits in a 16 bit word. assert (word < 0x10000); size_t count; count = (word & 0x5555) + ( (word >> 1 ) & 0x5555); count = (count & 0x3333) + ( (count >> 2) & 0x3333); count = (count & 0x0F0F) + ( (count >> 4) & 0x0F0F); count = (count & 0x00FF) + ( (count >> 8) & 0x00FF); return count; } void stomp_write_barrier_resize(bool is_runtime_suspended, bool requires_upper_bounds_check) { WriteBarrierParameters args = {}; args.operation = WriteBarrierOp::StompResize; args.is_runtime_suspended = is_runtime_suspended; args.requires_upper_bounds_check = requires_upper_bounds_check; args.card_table = g_gc_card_table; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES args.card_bundle_table = g_gc_card_bundle_table; #endif args.lowest_address = g_gc_lowest_address; args.highest_address = g_gc_highest_address; #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP if (SoftwareWriteWatch::IsEnabledForGCHeap()) { args.write_watch_table = g_gc_sw_ww_table; } #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP GCToEEInterface::StompWriteBarrier(&args); } void stomp_write_barrier_ephemeral(uint8_t* ephemeral_low, uint8_t* ephemeral_high) { initGCShadow(); WriteBarrierParameters args = {}; args.operation = WriteBarrierOp::StompEphemeral; args.is_runtime_suspended = true; args.ephemeral_low = ephemeral_low; args.ephemeral_high = ephemeral_high; GCToEEInterface::StompWriteBarrier(&args); } void stomp_write_barrier_initialize(uint8_t* ephemeral_low, uint8_t* ephemeral_high) { WriteBarrierParameters args = {}; args.operation = WriteBarrierOp::Initialize; args.is_runtime_suspended = true; args.requires_upper_bounds_check = false; args.card_table = g_gc_card_table; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES args.card_bundle_table = g_gc_card_bundle_table; #endif args.lowest_address = g_gc_lowest_address; args.highest_address = g_gc_highest_address; args.ephemeral_low = ephemeral_low; args.ephemeral_high = ephemeral_high; GCToEEInterface::StompWriteBarrier(&args); } //extract the low bits [0,low[ of a uint32_t #define lowbits(wrd, bits) ((wrd) & ((1 << (bits))-1)) //extract the high bits [high, 32] of a uint32_t #define highbits(wrd, bits) ((wrd) & ~((1 << (bits))-1)) // Things we need to manually initialize: // gen0 min_size - based on cache // gen0/1 max_size - based on segment size static static_data static_data_table[latency_level_last - latency_level_first + 1][total_generation_count] = { // latency_level_memory_footprint { // gen0 {0, 0, 40000, 0.5f, 9.0f, 20.0f, (1000 * 1000), 1}, // gen1 {160*1024, 0, 80000, 0.5f, 2.0f, 7.0f, (10 * 1000 * 1000), 10}, // gen2 {256*1024, SSIZE_T_MAX, 200000, 0.25f, 1.2f, 1.8f, (100 * 1000 * 1000), 100}, // loh {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0}, // poh {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0}, }, // latency_level_balanced { // gen0 {0, 0, 40000, 0.5f, #ifdef MULTIPLE_HEAPS 20.0f, 40.0f, #else 9.0f, 20.0f, #endif //MULTIPLE_HEAPS (1000 * 1000), 1}, // gen1 {256*1024, 0, 80000, 0.5f, 2.0f, 7.0f, (10 * 1000 * 1000), 10}, // gen2 {256*1024, SSIZE_T_MAX, 200000, 0.25f, 1.2f, 1.8f, (100 * 1000 * 1000), 100}, // loh {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0}, // poh {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0} }, }; class mark; class generation; class heap_segment; class CObjectHeader; class dynamic_data; class l_heap; class sorted_table; class c_synchronize; #ifdef FEATURE_PREMORTEM_FINALIZATION static HRESULT AllocateCFinalize(CFinalize **pCFinalize); #endif // FEATURE_PREMORTEM_FINALIZATION uint8_t* tree_search (uint8_t* tree, uint8_t* old_address); #ifdef USE_INTROSORT #define _sort introsort::sort #elif defined(USE_VXSORT) // in this case we have do_vxsort which takes an additional range that // all items to be sorted are contained in // so do not #define _sort #else //USE_INTROSORT #define _sort qsort1 void qsort1(uint8_t** low, uint8_t** high, unsigned int depth); #endif //USE_INTROSORT void* virtual_alloc (size_t size); void* virtual_alloc (size_t size, bool use_large_pages_p, uint16_t numa_node = NUMA_NODE_UNDEFINED); /* per heap static initialization */ #if defined(BACKGROUND_GC) && !defined(MULTIPLE_HEAPS) uint32_t* gc_heap::mark_array; #endif //BACKGROUND_GC && !MULTIPLE_HEAPS uint8_t** gc_heap::g_mark_list; uint8_t** gc_heap::g_mark_list_copy; size_t gc_heap::mark_list_size; bool gc_heap::mark_list_overflow; #ifdef USE_REGIONS uint8_t*** gc_heap::g_mark_list_piece; size_t gc_heap::g_mark_list_piece_size; #endif //USE_REGIONS seg_mapping* seg_mapping_table; #ifdef FEATURE_BASICFREEZE sorted_table* gc_heap::seg_table; #endif //FEATURE_BASICFREEZE #ifdef MULTIPLE_HEAPS GCEvent gc_heap::ee_suspend_event; size_t gc_heap::min_gen0_balance_delta = 0; size_t gc_heap::min_balance_threshold = 0; #endif //MULTIPLE_HEAPS VOLATILE(BOOL) gc_heap::gc_started; #ifdef MULTIPLE_HEAPS GCEvent gc_heap::gc_start_event; bool gc_heap::gc_thread_no_affinitize_p = false; uintptr_t process_mask = 0; int gc_heap::n_heaps; gc_heap** gc_heap::g_heaps; #if !defined(USE_REGIONS) || defined(_DEBUG) size_t* gc_heap::g_promoted; #endif //!USE_REGIONS || _DEBUG #ifdef MH_SC_MARK int* gc_heap::g_mark_stack_busy; #endif //MH_SC_MARK #ifdef BACKGROUND_GC size_t* gc_heap::g_bpromoted; #endif //BACKGROUND_GC BOOL gc_heap::gradual_decommit_in_progress_p = FALSE; size_t gc_heap::max_decommit_step_size = 0; #else //MULTIPLE_HEAPS #if !defined(USE_REGIONS) || defined(_DEBUG) size_t gc_heap::g_promoted; #endif //!USE_REGIONS || _DEBUG #ifdef BACKGROUND_GC size_t gc_heap::g_bpromoted; #endif //BACKGROUND_GC #endif //MULTIPLE_HEAPS size_t gc_heap::card_table_element_layout[total_bookkeeping_elements + 1]; #ifdef USE_REGIONS uint8_t* gc_heap::bookkeeping_covered_start = nullptr; uint8_t* gc_heap::bookkeeping_covered_committed = nullptr; size_t gc_heap::bookkeeping_sizes[total_bookkeeping_elements]; #endif //USE_REGIONS size_t gc_heap::reserved_memory = 0; size_t gc_heap::reserved_memory_limit = 0; BOOL gc_heap::g_low_memory_status; static gc_reason gc_trigger_reason = reason_empty; gc_latency_level gc_heap::latency_level = latency_level_default; gc_mechanisms gc_heap::settings; gc_history_global gc_heap::gc_data_global; uint64_t gc_heap::gc_last_ephemeral_decommit_time = 0; CLRCriticalSection gc_heap::check_commit_cs; size_t gc_heap::current_total_committed = 0; size_t gc_heap::committed_by_oh[total_oh_count] = {0, 0, 0, 0}; size_t gc_heap::current_total_committed_bookkeeping = 0; #ifdef FEATURE_EVENT_TRACE bool gc_heap::informational_event_enabled_p = false; uint64_t* gc_heap::gc_time_info = 0; #ifdef BACKGROUND_GC uint64_t* gc_heap::bgc_time_info = 0; #endif //BACKGROUND_GC size_t gc_heap::physical_memory_from_config = 0; size_t gc_heap::gen0_min_budget_from_config = 0; size_t gc_heap::gen0_max_budget_from_config = 0; int gc_heap::high_mem_percent_from_config = 0; bool gc_heap::use_frozen_segments_p = false; bool gc_heap::hard_limit_config_p = false; #ifdef FEATURE_LOH_COMPACTION gc_heap::etw_loh_compact_info* gc_heap::loh_compact_info; #endif //FEATURE_LOH_COMPACTION #endif //FEATURE_EVENT_TRACE #ifdef SHORT_PLUGS double gc_heap::short_plugs_pad_ratio = 0; #endif //SHORT_PLUGS int gc_heap::generation_skip_ratio_threshold = 0; int gc_heap::conserve_mem_setting = 0; uint64_t gc_heap::suspended_start_time = 0; uint64_t gc_heap::end_gc_time = 0; uint64_t gc_heap::total_suspended_time = 0; uint64_t gc_heap::process_start_time = 0; last_recorded_gc_info gc_heap::last_ephemeral_gc_info; last_recorded_gc_info gc_heap::last_full_blocking_gc_info; #ifdef BACKGROUND_GC last_recorded_gc_info gc_heap::last_bgc_info[2]; VOLATILE(bool) gc_heap::is_last_recorded_bgc = false; VOLATILE(int) gc_heap::last_bgc_info_index = 0; #endif //BACKGROUND_GC #if defined(HOST_64BIT) #define MAX_ALLOWED_MEM_LOAD 85 // consider putting this in dynamic data - // we may want different values for workstation // and server GC. #define MIN_YOUNGEST_GEN_DESIRED (16*1024*1024) size_t gc_heap::youngest_gen_desired_th; #endif //HOST_64BIT uint64_t gc_heap::mem_one_percent = 0; uint32_t gc_heap::high_memory_load_th = 0; uint32_t gc_heap::m_high_memory_load_th; uint32_t gc_heap::v_high_memory_load_th; bool gc_heap::is_restricted_physical_mem; uint64_t gc_heap::total_physical_mem = 0; uint64_t gc_heap::entry_available_physical_mem = 0; size_t gc_heap::heap_hard_limit = 0; size_t gc_heap::heap_hard_limit_oh[total_oh_count - 1] = {0, 0, 0}; #ifdef USE_REGIONS size_t gc_heap::regions_range = 0; #endif //USE_REGIONS bool affinity_config_specified_p = false; #ifdef USE_REGIONS region_allocator global_region_allocator; uint8_t*(*initial_regions)[total_generation_count][2] = nullptr; size_t gc_heap::region_count = 0; #endif //USE_REGIONS #ifdef BACKGROUND_GC GCEvent gc_heap::bgc_start_event; gc_mechanisms gc_heap::saved_bgc_settings; gc_history_global gc_heap::bgc_data_global; GCEvent gc_heap::background_gc_done_event; GCEvent gc_heap::ee_proceed_event; bool gc_heap::gc_can_use_concurrent = false; bool gc_heap::temp_disable_concurrent_p = false; uint32_t gc_heap::cm_in_progress = FALSE; BOOL gc_heap::dont_restart_ee_p = FALSE; BOOL gc_heap::keep_bgc_threads_p = FALSE; GCEvent gc_heap::bgc_threads_sync_event; BOOL gc_heap::do_ephemeral_gc_p = FALSE; BOOL gc_heap::do_concurrent_p = FALSE; size_t gc_heap::ephemeral_fgc_counts[max_generation]; BOOL gc_heap::alloc_wait_event_p = FALSE; VOLATILE(c_gc_state) gc_heap::current_c_gc_state = c_gc_state_free; VOLATILE(BOOL) gc_heap::gc_background_running = FALSE; #endif //BACKGROUND_GC #ifndef MULTIPLE_HEAPS #ifdef SPINLOCK_HISTORY int gc_heap::spinlock_info_index = 0; spinlock_info gc_heap::last_spinlock_info[max_saved_spinlock_info + 8]; #endif //SPINLOCK_HISTORY uint32_t gc_heap::fgn_maxgen_percent = 0; size_t gc_heap::fgn_last_alloc = 0; int gc_heap::generation_skip_ratio = 100; #ifdef FEATURE_CARD_MARKING_STEALING VOLATILE(size_t) gc_heap::n_eph_soh = 0; VOLATILE(size_t) gc_heap::n_gen_soh = 0; VOLATILE(size_t) gc_heap::n_eph_loh = 0; VOLATILE(size_t) gc_heap::n_gen_loh = 0; #endif //FEATURE_CARD_MARKING_STEALING uint64_t gc_heap::loh_alloc_since_cg = 0; BOOL gc_heap::elevation_requested = FALSE; BOOL gc_heap::last_gc_before_oom = FALSE; BOOL gc_heap::sufficient_gen0_space_p = FALSE; #ifdef BACKGROUND_GC uint8_t* gc_heap::background_saved_lowest_address = 0; uint8_t* gc_heap::background_saved_highest_address = 0; uint8_t* gc_heap::next_sweep_obj = 0; uint8_t* gc_heap::current_sweep_pos = 0; #ifdef DOUBLY_LINKED_FL heap_segment* gc_heap::current_sweep_seg = 0; #endif //DOUBLY_LINKED_FL exclusive_sync* gc_heap::bgc_alloc_lock; #endif //BACKGROUND_GC oom_history gc_heap::oom_info; int gc_heap::oomhist_index_per_heap = 0; oom_history gc_heap::oomhist_per_heap[max_oom_history_count]; fgm_history gc_heap::fgm_result; size_t gc_heap::allocated_since_last_gc[gc_oh_num::total_oh_count - 1]; BOOL gc_heap::ro_segments_in_range; #ifndef USE_REGIONS uint8_t* gc_heap::ephemeral_low; uint8_t* gc_heap::ephemeral_high; #endif //!USE_REGIONS uint8_t* gc_heap::lowest_address; uint8_t* gc_heap::highest_address; BOOL gc_heap::ephemeral_promotion; uint8_t* gc_heap::saved_ephemeral_plan_start[ephemeral_generation_count]; size_t gc_heap::saved_ephemeral_plan_start_size[ephemeral_generation_count]; short* gc_heap::brick_table; uint32_t* gc_heap::card_table; #ifdef CARD_BUNDLE uint32_t* gc_heap::card_bundle_table; #endif //CARD_BUNDLE uint8_t* gc_heap::gc_low = 0; uint8_t* gc_heap::gc_high = 0; #ifndef USE_REGIONS uint8_t* gc_heap::demotion_low; uint8_t* gc_heap::demotion_high; #endif //!USE_REGIONS BOOL gc_heap::demote_gen1_p = TRUE; uint8_t* gc_heap::last_gen1_pin_end; gen_to_condemn_tuning gc_heap::gen_to_condemn_reasons; size_t gc_heap::etw_allocation_running_amount[gc_oh_num::total_oh_count - 1]; uint64_t gc_heap::total_alloc_bytes_soh = 0; uint64_t gc_heap::total_alloc_bytes_uoh = 0; int gc_heap::gc_policy = 0; size_t gc_heap::allocation_running_time; size_t gc_heap::allocation_running_amount; heap_segment* gc_heap::ephemeral_heap_segment = 0; #ifdef USE_REGIONS #ifdef STRESS_REGIONS OBJECTHANDLE* gc_heap::pinning_handles_for_alloc = 0; int gc_heap::ph_index_per_heap = 0; int gc_heap::pinning_seg_interval = 2; size_t gc_heap::num_gen0_regions = 0; int gc_heap::sip_seg_interval = 0; int gc_heap::sip_seg_maxgen_interval = 0; size_t gc_heap::num_condemned_regions = 0; #endif //STRESS_REGIONS region_free_list gc_heap::free_regions[count_free_region_kinds]; int gc_heap::num_regions_freed_in_sweep = 0; int gc_heap::regions_per_gen[max_generation + 1]; int gc_heap::sip_maxgen_regions_per_gen[max_generation + 1]; heap_segment* gc_heap::reserved_free_regions_sip[max_generation]; int gc_heap::num_sip_regions = 0; size_t gc_heap::end_gen0_region_space = 0; size_t gc_heap::gen0_pinned_free_space = 0; bool gc_heap::gen0_large_chunk_found = false; size_t* gc_heap::survived_per_region = nullptr; size_t* gc_heap::old_card_survived_per_region = nullptr; #endif //USE_REGIONS BOOL gc_heap::blocking_collection = FALSE; heap_segment* gc_heap::freeable_uoh_segment = 0; uint64_t gc_heap::time_bgc_last = 0; size_t gc_heap::mark_stack_tos = 0; size_t gc_heap::mark_stack_bos = 0; size_t gc_heap::mark_stack_array_length = 0; mark* gc_heap::mark_stack_array = 0; #if defined (_DEBUG) && defined (VERIFY_HEAP) BOOL gc_heap::verify_pinned_queue_p = FALSE; #endif //_DEBUG && VERIFY_HEAP uint8_t* gc_heap::oldest_pinned_plug = 0; size_t gc_heap::num_pinned_objects = 0; #ifdef FEATURE_LOH_COMPACTION size_t gc_heap::loh_pinned_queue_tos = 0; size_t gc_heap::loh_pinned_queue_bos = 0; size_t gc_heap::loh_pinned_queue_length = 0; mark* gc_heap::loh_pinned_queue = 0; BOOL gc_heap::loh_compacted_p = FALSE; #endif //FEATURE_LOH_COMPACTION #ifdef BACKGROUND_GC EEThreadId gc_heap::bgc_thread_id; uint8_t* gc_heap::background_written_addresses [array_size+2]; heap_segment* gc_heap::freeable_soh_segment = 0; size_t gc_heap::bgc_overflow_count = 0; size_t gc_heap::bgc_begin_loh_size = 0; size_t gc_heap::end_loh_size = 0; size_t gc_heap::bgc_begin_poh_size = 0; size_t gc_heap::end_poh_size = 0; #ifdef BGC_SERVO_TUNING uint64_t gc_heap::loh_a_no_bgc = 0; uint64_t gc_heap::loh_a_bgc_marking = 0; uint64_t gc_heap::loh_a_bgc_planning = 0; size_t gc_heap::bgc_maxgen_end_fl_size = 0; #endif //BGC_SERVO_TUNING uint32_t gc_heap::bgc_alloc_spin_uoh = 0; size_t gc_heap::bgc_loh_size_increased = 0; size_t gc_heap::bgc_poh_size_increased = 0; size_t gc_heap::background_soh_size_end_mark = 0; size_t gc_heap::background_soh_alloc_count = 0; size_t gc_heap::background_uoh_alloc_count = 0; uint8_t** gc_heap::background_mark_stack_tos = 0; uint8_t** gc_heap::background_mark_stack_array = 0; size_t gc_heap::background_mark_stack_array_length = 0; BOOL gc_heap::processed_eph_overflow_p = FALSE; #ifdef USE_REGIONS BOOL gc_heap::background_overflow_p = FALSE; #else //USE_REGIONS uint8_t* gc_heap::background_min_overflow_address =0; uint8_t* gc_heap::background_max_overflow_address =0; uint8_t* gc_heap::background_min_soh_overflow_address =0; uint8_t* gc_heap::background_max_soh_overflow_address =0; heap_segment* gc_heap::saved_overflow_ephemeral_seg = 0; heap_segment* gc_heap::saved_sweep_ephemeral_seg = 0; uint8_t* gc_heap::saved_sweep_ephemeral_start = 0; #endif //USE_REGIONS Thread* gc_heap::bgc_thread = 0; uint8_t** gc_heap::c_mark_list = 0; size_t gc_heap::c_mark_list_length = 0; size_t gc_heap::c_mark_list_index = 0; gc_history_per_heap gc_heap::bgc_data_per_heap; BOOL gc_heap::bgc_thread_running; CLRCriticalSection gc_heap::bgc_threads_timeout_cs; #endif //BACKGROUND_GC uint8_t** gc_heap::mark_list; uint8_t** gc_heap::mark_list_index; uint8_t** gc_heap::mark_list_end; #ifdef SNOOP_STATS snoop_stats_data gc_heap::snoop_stat; #endif //SNOOP_STATS uint8_t* gc_heap::min_overflow_address = MAX_PTR; uint8_t* gc_heap::max_overflow_address = 0; uint8_t* gc_heap::shigh = 0; uint8_t* gc_heap::slow = MAX_PTR; size_t gc_heap::ordered_free_space_indices[MAX_NUM_BUCKETS]; size_t gc_heap::saved_ordered_free_space_indices[MAX_NUM_BUCKETS]; size_t gc_heap::ordered_plug_indices[MAX_NUM_BUCKETS]; size_t gc_heap::saved_ordered_plug_indices[MAX_NUM_BUCKETS]; BOOL gc_heap::ordered_plug_indices_init = FALSE; BOOL gc_heap::use_bestfit = FALSE; uint8_t* gc_heap::bestfit_first_pin = 0; BOOL gc_heap::commit_end_of_seg = FALSE; size_t gc_heap::max_free_space_items = 0; size_t gc_heap::free_space_buckets = 0; size_t gc_heap::free_space_items = 0; int gc_heap::trimmed_free_space_index = 0; size_t gc_heap::total_ephemeral_plugs = 0; seg_free_spaces* gc_heap::bestfit_seg = 0; size_t gc_heap::total_ephemeral_size = 0; #ifdef HEAP_ANALYZE size_t gc_heap::internal_root_array_length = initial_internal_roots; uint8_t** gc_heap::internal_root_array = 0; size_t gc_heap::internal_root_array_index = 0; BOOL gc_heap::heap_analyze_success = TRUE; uint8_t* gc_heap::current_obj = 0; size_t gc_heap::current_obj_size = 0; #endif //HEAP_ANALYZE #ifdef GC_CONFIG_DRIVEN size_t gc_heap::interesting_data_per_gc[max_idp_count]; //size_t gc_heap::interesting_data_per_heap[max_idp_count]; //size_t gc_heap::interesting_mechanisms_per_heap[max_im_count]; #endif //GC_CONFIG_DRIVEN #endif //MULTIPLE_HEAPS no_gc_region_info gc_heap::current_no_gc_region_info; BOOL gc_heap::proceed_with_gc_p = FALSE; GCSpinLock gc_heap::gc_lock; #ifdef BGC_SERVO_TUNING uint64_t gc_heap::total_loh_a_last_bgc = 0; #endif //BGC_SERVO_TUNING size_t gc_heap::eph_gen_starts_size = 0; heap_segment* gc_heap::segment_standby_list; #if defined(USE_REGIONS) region_free_list gc_heap::global_regions_to_decommit[count_free_region_kinds]; region_free_list gc_heap::global_free_huge_regions; #endif //USE_REGIONS bool gc_heap::use_large_pages_p = 0; #ifdef HEAP_BALANCE_INSTRUMENTATION size_t gc_heap::last_gc_end_time_us = 0; #endif //HEAP_BALANCE_INSTRUMENTATION #ifndef USE_REGIONS size_t gc_heap::min_segment_size = 0; size_t gc_heap::min_uoh_segment_size = 0; #endif //!USE_REGIONS size_t gc_heap::min_segment_size_shr = 0; size_t gc_heap::soh_segment_size = 0; size_t gc_heap::segment_info_size = 0; #ifdef GC_CONFIG_DRIVEN size_t gc_heap::compact_or_sweep_gcs[2]; #endif //GC_CONFIG_DRIVEN #ifdef FEATURE_LOH_COMPACTION BOOL gc_heap::loh_compaction_always_p = FALSE; gc_loh_compaction_mode gc_heap::loh_compaction_mode = loh_compaction_default; int gc_heap::loh_pinned_queue_decay = LOH_PIN_DECAY; #endif //FEATURE_LOH_COMPACTION GCEvent gc_heap::full_gc_approach_event; GCEvent gc_heap::full_gc_end_event; uint32_t gc_heap::fgn_loh_percent = 0; #ifdef BACKGROUND_GC BOOL gc_heap::fgn_last_gc_was_concurrent = FALSE; #endif //BACKGROUND_GC VOLATILE(bool) gc_heap::full_gc_approach_event_set; bool gc_heap::special_sweep_p = false; size_t gc_heap::full_gc_counts[gc_type_max]; bool gc_heap::maxgen_size_inc_p = false; BOOL gc_heap::should_expand_in_full_gc = FALSE; // Provisional mode related stuff. bool gc_heap::provisional_mode_triggered = false; bool gc_heap::pm_trigger_full_gc = false; size_t gc_heap::provisional_triggered_gc_count = 0; size_t gc_heap::provisional_off_gc_count = 0; size_t gc_heap::num_provisional_triggered = 0; bool gc_heap::pm_stress_on = false; #ifdef HEAP_ANALYZE BOOL gc_heap::heap_analyze_enabled = FALSE; #endif //HEAP_ANALYZE #ifndef MULTIPLE_HEAPS alloc_list gc_heap::loh_alloc_list [NUM_LOH_ALIST-1]; alloc_list gc_heap::gen2_alloc_list[NUM_GEN2_ALIST-1]; alloc_list gc_heap::poh_alloc_list [NUM_POH_ALIST-1]; #ifdef DOUBLY_LINKED_FL // size we removed with no undo; only for recording purpose size_t gc_heap::gen2_removed_no_undo = 0; size_t gc_heap::saved_pinned_plug_index = INVALID_SAVED_PINNED_PLUG_INDEX; #endif //DOUBLY_LINKED_FL #ifdef FEATURE_EVENT_TRACE etw_bucket_info gc_heap::bucket_info[NUM_GEN2_ALIST]; #endif //FEATURE_EVENT_TRACE dynamic_data gc_heap::dynamic_data_table [total_generation_count]; gc_history_per_heap gc_heap::gc_data_per_heap; size_t gc_heap::total_promoted_bytes = 0; size_t gc_heap::finalization_promoted_bytes = 0; size_t gc_heap::maxgen_pinned_compact_before_advance = 0; uint8_t* gc_heap::alloc_allocated = 0; size_t gc_heap::allocation_quantum = CLR_SIZE; GCSpinLock gc_heap::more_space_lock_soh; GCSpinLock gc_heap::more_space_lock_uoh; #ifdef BACKGROUND_GC VOLATILE(int32_t) gc_heap::uoh_alloc_thread_count = 0; #endif //BACKGROUND_GC #ifdef SYNCHRONIZATION_STATS unsigned int gc_heap::good_suspension = 0; unsigned int gc_heap::bad_suspension = 0; uint64_t gc_heap::total_msl_acquire = 0; unsigned int gc_heap::num_msl_acquired = 0; unsigned int gc_heap::num_high_msl_acquire = 0; unsigned int gc_heap::num_low_msl_acquire = 0; #endif //SYNCHRONIZATION_STATS size_t gc_heap::alloc_contexts_used = 0; size_t gc_heap::soh_allocation_no_gc = 0; size_t gc_heap::loh_allocation_no_gc = 0; bool gc_heap::no_gc_oom_p = false; heap_segment* gc_heap::saved_loh_segment_no_gc = 0; #endif //MULTIPLE_HEAPS #ifndef MULTIPLE_HEAPS BOOL gc_heap::gen0_bricks_cleared = FALSE; int gc_heap::gen0_must_clear_bricks = 0; #ifdef FEATURE_PREMORTEM_FINALIZATION CFinalize* gc_heap::finalize_queue = 0; #endif // FEATURE_PREMORTEM_FINALIZATION #ifdef FEATURE_CARD_MARKING_STEALING VOLATILE(uint32_t) gc_heap::card_mark_chunk_index_soh; VOLATILE(bool) gc_heap::card_mark_done_soh; VOLATILE(uint32_t) gc_heap::card_mark_chunk_index_loh; VOLATILE(uint32_t) gc_heap::card_mark_chunk_index_poh; VOLATILE(bool) gc_heap::card_mark_done_uoh; #endif // FEATURE_CARD_MARKING_STEALING generation gc_heap::generation_table [total_generation_count]; size_t gc_heap::interesting_data_per_heap[max_idp_count]; size_t gc_heap::compact_reasons_per_heap[max_compact_reasons_count]; size_t gc_heap::expand_mechanisms_per_heap[max_expand_mechanisms_count]; size_t gc_heap::interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count]; #endif // MULTIPLE_HEAPS /* end of per heap static initialization */ // budget smoothing size_t gc_heap::smoothed_desired_per_heap[total_generation_count]; /* end of static initialization */ // This is for methods that need to iterate through all SOH heap segments/regions. inline int get_start_generation_index() { #ifdef USE_REGIONS return 0; #else return max_generation; #endif //USE_REGIONS } inline int get_stop_generation_index (int condemned_gen_number) { #ifdef USE_REGIONS return 0; #else return condemned_gen_number; #endif //USE_REGIONS } void gen_to_condemn_tuning::print (int heap_num) { #ifdef DT_LOG dprintf (DT_LOG_0, ("condemned reasons (%d %d)", condemn_reasons_gen, condemn_reasons_condition)); dprintf (DT_LOG_0, ("%s", record_condemn_reasons_gen_header)); gc_condemn_reason_gen r_gen; for (int i = 0; i < gcrg_max; i++) { r_gen = (gc_condemn_reason_gen)(i); str_reasons_gen[i * 2] = get_gen_char (get_gen (r_gen)); } dprintf (DT_LOG_0, ("[%2d]%s", heap_num, str_reasons_gen)); dprintf (DT_LOG_0, ("%s", record_condemn_reasons_condition_header)); gc_condemn_reason_condition r_condition; for (int i = 0; i < gcrc_max; i++) { r_condition = (gc_condemn_reason_condition)(i); str_reasons_condition[i * 2] = get_condition_char (get_condition (r_condition)); } dprintf (DT_LOG_0, ("[%2d]%s", heap_num, str_reasons_condition)); #else UNREFERENCED_PARAMETER(heap_num); #endif //DT_LOG } void gc_generation_data::print (int heap_num, int gen_num) { #if defined(SIMPLE_DPRINTF) && defined(DT_LOG) dprintf (DT_LOG_0, ("[%2d]gen%d beg %Id fl %Id fo %Id end %Id fl %Id fo %Id in %Id p %Id np %Id alloc %Id", heap_num, gen_num, size_before, free_list_space_before, free_obj_space_before, size_after, free_list_space_after, free_obj_space_after, in, pinned_surv, npinned_surv, new_allocation)); #else UNREFERENCED_PARAMETER(heap_num); UNREFERENCED_PARAMETER(gen_num); #endif //SIMPLE_DPRINTF && DT_LOG } void gc_history_per_heap::set_mechanism (gc_mechanism_per_heap mechanism_per_heap, uint32_t value) { uint32_t* mechanism = &mechanisms[mechanism_per_heap]; *mechanism = 0; *mechanism |= mechanism_mask; *mechanism |= (1 << value); #ifdef DT_LOG gc_mechanism_descr* descr = &gc_mechanisms_descr[mechanism_per_heap]; dprintf (DT_LOG_0, ("setting %s: %s", descr->name, (descr->descr)[value])); #endif //DT_LOG } void gc_history_per_heap::print() { #if defined(SIMPLE_DPRINTF) && defined(DT_LOG) for (int i = 0; i < (sizeof (gen_data)/sizeof (gc_generation_data)); i++) { gen_data[i].print (heap_index, i); } dprintf (DT_LOG_0, ("fla %Id flr %Id esa %Id ca %Id pa %Id paa %Id, rfle %d, ec %Id", maxgen_size_info.free_list_allocated, maxgen_size_info.free_list_rejected, maxgen_size_info.end_seg_allocated, maxgen_size_info.condemned_allocated, maxgen_size_info.pinned_allocated, maxgen_size_info.pinned_allocated_advance, maxgen_size_info.running_free_list_efficiency, extra_gen0_committed)); int mechanism = 0; gc_mechanism_descr* descr = 0; for (int i = 0; i < max_mechanism_per_heap; i++) { mechanism = get_mechanism ((gc_mechanism_per_heap)i); if (mechanism >= 0) { descr = &gc_mechanisms_descr[(gc_mechanism_per_heap)i]; dprintf (DT_LOG_0, ("[%2d]%s%s", heap_index, descr->name, (descr->descr)[mechanism])); } } #endif //SIMPLE_DPRINTF && DT_LOG } void gc_history_global::print() { #ifdef DT_LOG char str_settings[64]; memset (str_settings, '|', sizeof (char) * 64); str_settings[max_global_mechanisms_count*2] = 0; for (int i = 0; i < max_global_mechanisms_count; i++) { str_settings[i * 2] = (get_mechanism_p ((gc_global_mechanism_p)i) ? 'Y' : 'N'); } dprintf (DT_LOG_0, ("[hp]|c|p|o|d|b|e|")); dprintf (DT_LOG_0, ("%4d|%s", num_heaps, str_settings)); dprintf (DT_LOG_0, ("Condemned gen%d(reason: %s; mode: %s), youngest budget %Id(%d), memload %d", condemned_generation, str_gc_reasons[reason], str_gc_pause_modes[pause_mode], final_youngest_desired, gen0_reduction_count, mem_pressure)); #endif //DT_LOG } uint32_t limit_time_to_uint32 (uint64_t time) { time = min (time, UINT32_MAX); return (uint32_t)time; } void gc_heap::fire_per_heap_hist_event (gc_history_per_heap* current_gc_data_per_heap, int heap_num) { maxgen_size_increase* maxgen_size_info = &(current_gc_data_per_heap->maxgen_size_info); FIRE_EVENT(GCPerHeapHistory_V3, (void *)(maxgen_size_info->free_list_allocated), (void *)(maxgen_size_info->free_list_rejected), (void *)(maxgen_size_info->end_seg_allocated), (void *)(maxgen_size_info->condemned_allocated), (void *)(maxgen_size_info->pinned_allocated), (void *)(maxgen_size_info->pinned_allocated_advance), maxgen_size_info->running_free_list_efficiency, current_gc_data_per_heap->gen_to_condemn_reasons.get_reasons0(), current_gc_data_per_heap->gen_to_condemn_reasons.get_reasons1(), current_gc_data_per_heap->mechanisms[gc_heap_compact], current_gc_data_per_heap->mechanisms[gc_heap_expand], current_gc_data_per_heap->heap_index, (void *)(current_gc_data_per_heap->extra_gen0_committed), total_generation_count, (uint32_t)(sizeof (gc_generation_data)), (void *)&(current_gc_data_per_heap->gen_data[0])); current_gc_data_per_heap->print(); current_gc_data_per_heap->gen_to_condemn_reasons.print (heap_num); } void gc_heap::fire_pevents() { gc_history_global* current_gc_data_global = get_gc_data_global(); settings.record (current_gc_data_global); current_gc_data_global->print(); #ifdef FEATURE_EVENT_TRACE if (!informational_event_enabled_p) return; uint32_t count_time_info = (settings.concurrent ? max_bgc_time_type : (settings.compaction ? max_compact_time_type : max_sweep_time_type)); #ifdef BACKGROUND_GC uint64_t* time_info = (settings.concurrent ? bgc_time_info : gc_time_info); #else uint64_t* time_info = gc_time_info; #endif //BACKGROUND_GC // We don't want to have to fire the time info as 64-bit integers as there's no need to // so compress them down to 32-bit ones. uint32_t* time_info_32 = (uint32_t*)time_info; for (uint32_t i = 0; i < count_time_info; i++) { time_info_32[i] = limit_time_to_uint32 (time_info[i]); } FIRE_EVENT(GCGlobalHeapHistory_V4, current_gc_data_global->final_youngest_desired, current_gc_data_global->num_heaps, current_gc_data_global->condemned_generation, current_gc_data_global->gen0_reduction_count, current_gc_data_global->reason, current_gc_data_global->global_mechanisms_p, current_gc_data_global->pause_mode, current_gc_data_global->mem_pressure, current_gc_data_global->gen_to_condemn_reasons.get_reasons0(), current_gc_data_global->gen_to_condemn_reasons.get_reasons1(), count_time_info, (uint32_t)(sizeof (uint32_t)), (void*)time_info_32); #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; gc_history_per_heap* current_gc_data_per_heap = hp->get_gc_data_per_heap(); fire_per_heap_hist_event (current_gc_data_per_heap, hp->heap_number); } #else gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap(); fire_per_heap_hist_event (current_gc_data_per_heap, heap_number); #endif //MULTIPLE_HEAPS #ifdef FEATURE_LOH_COMPACTION if (!settings.concurrent && settings.loh_compaction) { // Not every heap will compact LOH, the ones that didn't will just have 0s // in its info. FIRE_EVENT(GCLOHCompact, get_num_heaps(), (uint32_t)(sizeof (etw_loh_compact_info)), (void *)loh_compact_info); } #endif //FEATURE_LOH_COMPACTION #endif //FEATURE_EVENT_TRACE } inline BOOL gc_heap::dt_low_ephemeral_space_p (gc_tuning_point tp) { BOOL ret = FALSE; switch (tp) { case tuning_deciding_condemned_gen: #ifndef USE_REGIONS case tuning_deciding_compaction: case tuning_deciding_expansion: #endif //USE_REGIONS case tuning_deciding_full_gc: { ret = (!ephemeral_gen_fit_p (tp)); break; } #ifndef USE_REGIONS case tuning_deciding_promote_ephemeral: { size_t new_gen0size = approximate_new_allocation(); ptrdiff_t plan_ephemeral_size = total_ephemeral_size; dprintf (GTC_LOG, ("h%d: plan eph size is %Id, new gen0 is %Id", heap_number, plan_ephemeral_size, new_gen0size)); // If we were in no_gc_region we could have allocated a larger than normal segment, // and the next seg we allocate will be a normal sized seg so if we can't fit the new // ephemeral generations there, do an ephemeral promotion. ret = ((soh_segment_size - segment_info_size) < (plan_ephemeral_size + new_gen0size)); break; } #endif //USE_REGIONS default: { assert (!"invalid tuning reason"); break; } } return ret; } BOOL gc_heap::dt_high_frag_p (gc_tuning_point tp, int gen_number, BOOL elevate_p) { BOOL ret = FALSE; switch (tp) { case tuning_deciding_condemned_gen: { dynamic_data* dd = dynamic_data_of (gen_number); float fragmentation_burden = 0; if (elevate_p) { ret = (dd_fragmentation (dynamic_data_of (max_generation)) >= dd_max_size(dd)); dprintf (GTC_LOG, ("h%d: frag is %Id, max size is %Id", heap_number, dd_fragmentation (dd), dd_max_size(dd))); } else { #ifndef MULTIPLE_HEAPS if (gen_number == max_generation) { float frag_ratio = (float)(dd_fragmentation (dynamic_data_of (max_generation))) / (float)generation_size (max_generation); if (frag_ratio > 0.65) { dprintf (GTC_LOG, ("g2 FR: %d%%", (int)(frag_ratio*100))); return TRUE; } } #endif //!MULTIPLE_HEAPS size_t fr = generation_unusable_fragmentation (generation_of (gen_number)); ret = (fr > dd_fragmentation_limit(dd)); if (ret) { fragmentation_burden = (float)fr / generation_size (gen_number); ret = (fragmentation_burden > dd_v_fragmentation_burden_limit (dd)); } dprintf (GTC_LOG, ("h%d: gen%d, frag is %Id, alloc effi: %d%%, unusable frag is %Id, ratio is %d", heap_number, gen_number, dd_fragmentation (dd), (int)(100*generation_allocator_efficiency (generation_of (gen_number))), fr, (int)(fragmentation_burden*100))); } break; } default: break; } return ret; } inline BOOL gc_heap::dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number) { BOOL ret = FALSE; switch (tp) { case tuning_deciding_condemned_gen: { if (gen_number == max_generation) { size_t est_maxgen_free = estimated_reclaim (gen_number); uint32_t num_heaps = 1; #ifdef MULTIPLE_HEAPS num_heaps = gc_heap::n_heaps; #endif //MULTIPLE_HEAPS size_t min_frag_th = min_reclaim_fragmentation_threshold (num_heaps); dprintf (GTC_LOG, ("h%d, min frag is %Id", heap_number, min_frag_th)); ret = (est_maxgen_free >= min_frag_th); } else { assert (0); } break; } default: break; } return ret; } // DTREVIEW: Right now we only estimate gen2 fragmentation. // on 64-bit though we should consider gen1 or even gen0 fragmentation as // well inline BOOL gc_heap::dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, uint64_t available_mem) { BOOL ret = FALSE; switch (tp) { case tuning_deciding_condemned_gen: { if (gen_number == max_generation) { dynamic_data* dd = dynamic_data_of (gen_number); float est_frag_ratio = 0; if (dd_current_size (dd) == 0) { est_frag_ratio = 1; } else if ((dd_fragmentation (dd) == 0) || (dd_fragmentation (dd) + dd_current_size (dd) == 0)) { est_frag_ratio = 0; } else { est_frag_ratio = (float)dd_fragmentation (dd) / (float)(dd_fragmentation (dd) + dd_current_size (dd)); } size_t est_frag = (dd_fragmentation (dd) + (size_t)((dd_desired_allocation (dd) - dd_new_allocation (dd)) * est_frag_ratio)); dprintf (GTC_LOG, ("h%d: gen%d: current_size is %Id, frag is %Id, est_frag_ratio is %d%%, estimated frag is %Id", heap_number, gen_number, dd_current_size (dd), dd_fragmentation (dd), (int)(est_frag_ratio*100), est_frag)); uint32_t num_heaps = 1; #ifdef MULTIPLE_HEAPS num_heaps = gc_heap::n_heaps; #endif //MULTIPLE_HEAPS uint64_t min_frag_th = min_high_fragmentation_threshold(available_mem, num_heaps); //dprintf (GTC_LOG, ("h%d, min frag is %I64d", heap_number, min_frag_th)); ret = (est_frag >= min_frag_th); } else { assert (0); } break; } default: break; } return ret; } inline BOOL gc_heap::dt_low_card_table_efficiency_p (gc_tuning_point tp) { BOOL ret = FALSE; switch (tp) { case tuning_deciding_condemned_gen: { /* promote into max-generation if the card table has too many * generation faults besides the n -> 0 */ ret = (generation_skip_ratio < generation_skip_ratio_threshold); break; } default: break; } return ret; } inline BOOL gc_heap::dt_high_memory_load_p() { return ((settings.entry_memory_load >= high_memory_load_th) || g_low_memory_status); } inline BOOL in_range_for_segment(uint8_t* add, heap_segment* seg) { return ((add >= heap_segment_mem (seg)) && (add < heap_segment_reserved (seg))); } #ifdef FEATURE_BASICFREEZE // The array we allocate is organized as follows: // 0th element is the address of the last array we allocated. // starting from the 1st element are the segment addresses, that's // what buckets() returns. struct bk { uint8_t* add; size_t val; }; class sorted_table { private: ptrdiff_t size; ptrdiff_t count; bk* slots; bk* buckets() { return (slots + 1); } uint8_t*& last_slot (bk* arr) { return arr[0].add; } bk* old_slots; public: static sorted_table* make_sorted_table (); BOOL insert (uint8_t* add, size_t val);; size_t lookup (uint8_t*& add); void remove (uint8_t* add); void clear (); void delete_sorted_table(); void delete_old_slots(); void enqueue_old_slot(bk* sl); BOOL ensure_space_for_insert(); }; sorted_table* sorted_table::make_sorted_table () { size_t size = 400; // allocate one more bk to store the older slot address. sorted_table* res = (sorted_table*)new (nothrow) char [sizeof (sorted_table) + (size + 1) * sizeof (bk)]; if (!res) return 0; res->size = size; res->slots = (bk*)(res + 1); res->old_slots = 0; res->clear(); return res; } void sorted_table::delete_sorted_table() { if (slots != (bk*)(this+1)) { delete slots; } delete_old_slots(); delete this; } void sorted_table::delete_old_slots() { uint8_t* sl = (uint8_t*)old_slots; while (sl) { uint8_t* dsl = sl; sl = last_slot ((bk*)sl); delete dsl; } old_slots = 0; } void sorted_table::enqueue_old_slot(bk* sl) { last_slot (sl) = (uint8_t*)old_slots; old_slots = sl; } inline size_t sorted_table::lookup (uint8_t*& add) { ptrdiff_t high = (count-1); ptrdiff_t low = 0; ptrdiff_t ti; ptrdiff_t mid; bk* buck = buckets(); while (low <= high) { mid = ((low + high)/2); ti = mid; if (buck[ti].add > add) { if ((ti > 0) && (buck[ti-1].add <= add)) { add = buck[ti-1].add; return buck[ti - 1].val; } high = mid - 1; } else { if (buck[ti+1].add > add) { add = buck[ti].add; return buck[ti].val; } low = mid + 1; } } add = 0; return 0; } BOOL sorted_table::ensure_space_for_insert() { if (count == size) { size = (size * 3)/2; assert((size * sizeof (bk)) > 0); bk* res = (bk*)new (nothrow) char [(size + 1) * sizeof (bk)]; assert (res); if (!res) return FALSE; last_slot (res) = 0; memcpy (((bk*)res + 1), buckets(), count * sizeof (bk)); bk* last_old_slots = slots; slots = res; if (last_old_slots != (bk*)(this + 1)) enqueue_old_slot (last_old_slots); } return TRUE; } BOOL sorted_table::insert (uint8_t* add, size_t val) { //grow if no more room assert (count < size); //insert sorted ptrdiff_t high = (count-1); ptrdiff_t low = 0; ptrdiff_t ti; ptrdiff_t mid; bk* buck = buckets(); while (low <= high) { mid = ((low + high)/2); ti = mid; if (buck[ti].add > add) { if ((ti == 0) || (buck[ti-1].add <= add)) { // found insertion point for (ptrdiff_t k = count; k > ti;k--) { buck [k] = buck [k-1]; } buck[ti].add = add; buck[ti].val = val; count++; return TRUE; } high = mid - 1; } else { if (buck[ti+1].add > add) { //found the insertion point for (ptrdiff_t k = count; k > ti+1;k--) { buck [k] = buck [k-1]; } buck[ti+1].add = add; buck[ti+1].val = val; count++; return TRUE; } low = mid + 1; } } assert (0); return TRUE; } void sorted_table::remove (uint8_t* add) { ptrdiff_t high = (count-1); ptrdiff_t low = 0; ptrdiff_t ti; ptrdiff_t mid; bk* buck = buckets(); while (low <= high) { mid = ((low + high)/2); ti = mid; if (buck[ti].add > add) { if (buck[ti-1].add <= add) { for (ptrdiff_t k = ti; k < count; k++) buck[k-1] = buck[k]; count--; return; } high = mid - 1; } else { if (buck[ti+1].add > add) { for (ptrdiff_t k = ti+1; k < count; k++) buck[k-1] = buck[k]; count--; return; } low = mid + 1; } } assert (0); } void sorted_table::clear() { count = 1; buckets()[0].add = MAX_PTR; } #endif //FEATURE_BASICFREEZE #ifdef USE_REGIONS inline size_t get_basic_region_index_for_address (uint8_t* address) { size_t basic_region_index = (size_t)address >> gc_heap::min_segment_size_shr; return (basic_region_index - ((size_t)g_gc_lowest_address >> gc_heap::min_segment_size_shr)); } // Go from a random address to its region info. The random address could be // in one of the basic regions of a larger region so we need to check for that. inline heap_segment* get_region_info_for_address (uint8_t* address) { size_t basic_region_index = (size_t)address >> gc_heap::min_segment_size_shr; heap_segment* basic_region_info_entry = (heap_segment*)&seg_mapping_table[basic_region_index]; ptrdiff_t first_field = (ptrdiff_t)heap_segment_allocated (basic_region_info_entry); if (first_field < 0) { basic_region_index += first_field; } return ((heap_segment*)(&seg_mapping_table[basic_region_index])); } // Go from the physical start of a region to its region info. inline heap_segment* get_region_info (uint8_t* region_start) { size_t region_index = (size_t)region_start >> gc_heap::min_segment_size_shr; heap_segment* region_info_entry = (heap_segment*)&seg_mapping_table[region_index]; dprintf (REGIONS_LOG, ("region info for region %Ix is at %Id, %Ix (alloc: %Ix)", region_start, region_index, (size_t)region_info_entry, heap_segment_allocated (region_info_entry))); return (heap_segment*)&seg_mapping_table[region_index]; } // Go from the actual region info to its region start. inline uint8_t* get_region_start (heap_segment* region_info) { uint8_t* obj_start = heap_segment_mem (region_info); return (obj_start - sizeof (aligned_plug_and_gap)); } inline size_t get_region_size (heap_segment* region_info) { return (size_t)(heap_segment_reserved (region_info) - get_region_start (region_info)); } inline size_t get_region_committed_size (heap_segment* region) { uint8_t* start = get_region_start (region); uint8_t* committed = heap_segment_committed (region); return committed - start; } inline bool is_free_region (heap_segment* region) { return (heap_segment_allocated (region) == nullptr); } bool region_allocator::init (uint8_t* start, uint8_t* end, size_t alignment, uint8_t** lowest, uint8_t** highest) { uint8_t* actual_start = start; region_alignment = alignment; large_region_alignment = LARGE_REGION_FACTOR * alignment; global_region_start = (uint8_t*)align_region_up ((size_t)actual_start); uint8_t* actual_end = end; global_region_end = (uint8_t*)align_region_down ((size_t)actual_end); global_region_left_used = global_region_start; global_region_right_used = global_region_end; // Note: I am allocating a map that covers the whole reserved range. // We can optimize it to only cover the current heap range. size_t total_num_units = (global_region_end - global_region_start) / region_alignment; total_free_units = (uint32_t)total_num_units; uint32_t* unit_map = new (nothrow) uint32_t[total_num_units]; if (unit_map) { memset (unit_map, 0, sizeof (uint32_t) * total_num_units); region_map_left_start = unit_map; region_map_left_end = region_map_left_start; region_map_right_start = unit_map + total_num_units; region_map_right_end = region_map_right_start; dprintf (REGIONS_LOG, ("start: %Ix, end: %Ix, total %Idmb(alignment: %Idmb), map units %d", (size_t)start, (size_t)end, (size_t)((end - start) / 1024 / 1024), (alignment / 1024 / 1024), total_num_units)); *lowest = global_region_start; *highest = global_region_end; } return (unit_map != 0); } inline uint8_t* region_allocator::region_address_of (uint32_t* map_index) { return (global_region_start + ((map_index - region_map_left_start) * region_alignment)); } inline uint32_t* region_allocator::region_map_index_of (uint8_t* address) { return (region_map_left_start + ((address - global_region_start) / region_alignment)); } void region_allocator::make_busy_block (uint32_t* index_start, uint32_t num_units) { #ifdef _DEBUG dprintf (REGIONS_LOG, ("MBB[B: %Id] %d->%d", (size_t)num_units, (int)(index_start - region_map_left_start), (int)(index_start - region_map_left_start + num_units))); #endif //_DEBUG ASSERT_HOLDING_SPIN_LOCK (&region_allocator_lock); uint32_t* index_end = index_start + (num_units - 1); *index_start = *index_end = num_units; } void region_allocator::make_free_block (uint32_t* index_start, uint32_t num_units) { #ifdef _DEBUG dprintf (REGIONS_LOG, ("MFB[F: %Id] %d->%d", (size_t)num_units, (int)(index_start - region_map_left_start), (int)(index_start - region_map_left_start + num_units))); #endif //_DEBUG ASSERT_HOLDING_SPIN_LOCK (&region_allocator_lock); uint32_t* index_end = index_start + (num_units - 1); *index_start = *index_end = region_alloc_free_bit | num_units; } void region_allocator::print_map (const char* msg) { ASSERT_HOLDING_SPIN_LOCK (&region_allocator_lock); #ifdef _DEBUG const char* heap_type = "UH"; dprintf (REGIONS_LOG, ("[%s]-----printing----%s", heap_type, msg)); uint32_t* current_index = region_map_left_start; uint32_t* end_index = region_map_left_end; uint32_t count_free_units = 0; for (int i = 0; i < 2; i++) { while (current_index < end_index) { uint32_t current_val = *current_index; uint32_t current_num_units = get_num_units (current_val); bool free_p = is_unit_memory_free (current_val); dprintf (REGIONS_LOG, ("[%s][%s: %Id]%d->%d", heap_type, (free_p ? "F" : "B"), (size_t)current_num_units, (int)(current_index - region_map_left_start), (int)(current_index - region_map_left_start + current_num_units))); if (free_p) { count_free_units += current_num_units; } current_index += current_num_units; } current_index = region_map_right_start; end_index = region_map_right_end; } count_free_units += (uint32_t)(region_map_right_start - region_map_left_end); assert(count_free_units == total_free_units); uint32_t total_regions = (uint32_t)((global_region_end - global_region_start) / region_alignment); dprintf (REGIONS_LOG, ("[%s]-----end printing----[%d total, left used %d, right used %d]\n", heap_type, total_regions, (region_map_left_end - region_map_left_start), (region_map_right_end - region_map_right_start))); #endif //_DEBUG } uint8_t* region_allocator::allocate_end (uint32_t num_units, allocate_direction direction) { uint8_t* alloc = NULL; ASSERT_HOLDING_SPIN_LOCK (&region_allocator_lock); if (global_region_left_used < global_region_right_used) { size_t end_remaining = global_region_right_used - global_region_left_used; if ((end_remaining / region_alignment) >= num_units) { if (direction == allocate_forward) { make_busy_block (region_map_left_end, num_units); region_map_left_end += num_units; alloc = global_region_left_used; global_region_left_used += num_units * region_alignment; } else { assert(direction == allocate_backward); region_map_right_start -= num_units; make_busy_block (region_map_right_start, num_units); global_region_right_used -= num_units * region_alignment; alloc = global_region_right_used; } } } return alloc; } void region_allocator::enter_spin_lock() { while (true) { if (Interlocked::CompareExchange(&region_allocator_lock.lock, 0, -1) < 0) break; while (region_allocator_lock.lock >= 0) { YieldProcessor(); // indicate to the processor that we are spinning } } #ifdef _DEBUG region_allocator_lock.holding_thread = GCToEEInterface::GetThread(); #endif //_DEBUG } void region_allocator::leave_spin_lock() { #ifdef _DEBUG region_allocator_lock.holding_thread = (Thread*)-1; #endif //_DEBUG region_allocator_lock.lock = -1; } uint8_t* region_allocator::allocate (uint32_t num_units, allocate_direction direction, region_allocator_callback_fn fn) { enter_spin_lock(); uint32_t* current_index; uint32_t* end_index; if (direction == allocate_forward) { current_index = region_map_left_start; end_index = region_map_left_end; } else { assert(direction == allocate_backward); current_index = region_map_right_end; end_index = region_map_right_start; } dprintf (REGIONS_LOG, ("searching %d->%d", (int)(current_index - region_map_left_start), (int)(end_index - region_map_left_start))); print_map ("before alloc"); while (((direction == allocate_forward) && (current_index < end_index)) || ((direction == allocate_backward) && (current_index > end_index))) { uint32_t current_val = *(current_index - ((direction == -1) ? 1 : 0)); uint32_t current_num_units = get_num_units (current_val); bool free_p = is_unit_memory_free (current_val); dprintf (REGIONS_LOG, ("ALLOC[%s: %Id]%d->%d", (free_p ? "F" : "B"), (size_t)current_num_units, (int)(current_index - region_map_left_start), (int)(current_index + current_num_units - region_map_left_start))); if (free_p) { if (current_num_units >= num_units) { dprintf (REGIONS_LOG, ("found %Id contiguous free units(%d->%d), sufficient", (size_t)current_num_units, (int)(current_index - region_map_left_start), (int)(current_index - region_map_left_start + current_num_units))); uint32_t* busy_block; uint32_t* free_block; if (direction == 1) { busy_block = current_index; free_block = current_index + num_units; } else { busy_block = current_index - num_units; free_block = current_index - current_num_units; } make_busy_block (busy_block, num_units); if ((current_num_units - num_units) > 0) { make_free_block (free_block, (current_num_units - num_units)); } total_free_units -= num_units; print_map ("alloc: found in free"); leave_spin_lock(); return region_address_of (busy_block); } } if (direction == allocate_forward) { current_index += current_num_units; } else { current_index -= current_num_units; } } uint8_t* alloc = allocate_end (num_units, direction); if (alloc) { total_free_units -= num_units; if (fn != nullptr) { if (!fn (global_region_left_used)) { delete_region_impl (alloc); alloc = nullptr; } } if (alloc) { print_map ("alloc: found at the end"); } } else { dprintf (REGIONS_LOG, ("couldn't find memory at the end! only %Id bytes left", (global_region_right_used - global_region_left_used))); } leave_spin_lock(); return alloc; } // ETW TODO: need to fire create seg events for these methods. // FIRE_EVENT(GCCreateSegment_V1 bool region_allocator::allocate_region (size_t size, uint8_t** start, uint8_t** end, allocate_direction direction, region_allocator_callback_fn fn) { size_t alignment = region_alignment; size_t alloc_size = align_region_up (size); uint32_t num_units = (uint32_t)(alloc_size / alignment); bool ret = false; uint8_t* alloc = NULL; dprintf (REGIONS_LOG, ("----GET %d-----", num_units)); alloc = allocate (num_units, direction, fn); *start = alloc; *end = alloc + alloc_size; ret = (alloc != NULL); return ret; } bool region_allocator::allocate_basic_region (uint8_t** start, uint8_t** end, region_allocator_callback_fn fn) { return allocate_region (region_alignment, start, end, allocate_forward, fn); } // Large regions are 8x basic region sizes by default. If you need a larger region than that, // call allocate_region with the size. bool region_allocator::allocate_large_region (uint8_t** start, uint8_t** end, allocate_direction direction, size_t size, region_allocator_callback_fn fn) { if (size == 0) size = large_region_alignment; else { // round up size to a multiple of large_region_alignment // for the below computation to work, large_region_alignment must be a power of 2 assert (round_up_power2(large_region_alignment) == large_region_alignment); size = (size + (large_region_alignment - 1)) & ~(large_region_alignment - 1); } return allocate_region (size, start, end, direction, fn); } void region_allocator::delete_region (uint8_t* region_start) { enter_spin_lock(); delete_region_impl (region_start); leave_spin_lock(); } void region_allocator::delete_region_impl (uint8_t* region_start) { ASSERT_HOLDING_SPIN_LOCK (&region_allocator_lock); assert (is_region_aligned (region_start)); print_map ("before delete"); uint32_t* current_index = region_map_index_of (region_start); uint32_t current_val = *current_index; assert (!is_unit_memory_free (current_val)); dprintf (REGIONS_LOG, ("----DEL %d (%u units)-----", (current_index - region_map_left_start), current_val)); uint32_t* region_end_index = current_index + current_val; uint8_t* region_end = region_address_of (region_end_index); int free_block_size = current_val; uint32_t* free_index = current_index; if ((current_index != region_map_left_start) && (current_index != region_map_right_start)) { uint32_t previous_val = *(current_index - 1); if (is_unit_memory_free(previous_val)) { uint32_t previous_size = get_num_units (previous_val); free_index -= previous_size; free_block_size += previous_size; } } if ((region_end != global_region_left_used) && (region_end != global_region_end)) { uint32_t next_val = *region_end_index; if (is_unit_memory_free(next_val)) { uint32_t next_size = get_num_units (next_val); free_block_size += next_size; region_end += next_size; } } if (region_end == global_region_left_used) { region_map_left_end = free_index; dprintf (REGIONS_LOG, ("adjust global left used from %Ix to %Ix", global_region_left_used, region_address_of (free_index))); global_region_left_used = region_address_of (free_index); } else if (region_start == global_region_right_used) { region_map_right_start = free_index + free_block_size; dprintf (REGIONS_LOG, ("adjust global right used from %Ix to %Ix", global_region_right_used, region_address_of (free_index + free_block_size))); global_region_right_used = region_address_of (free_index + free_block_size); } else { make_free_block (free_index, free_block_size); } total_free_units += current_val; print_map ("after delete"); } void region_allocator::move_highest_free_regions (int64_t n, bool small_region_p, region_free_list to_free_list[count_free_region_kinds]) { assert (n > 0); uint32_t* current_index = region_map_left_end - 1; uint32_t* lowest_index = region_map_left_start; while (current_index >= lowest_index) { uint32_t current_val = *current_index; uint32_t current_num_units = get_num_units (current_val); bool free_p = is_unit_memory_free (current_val); if (!free_p && ((current_num_units == 1) == small_region_p)) { uint32_t* index = current_index - (current_num_units - 1); heap_segment* region = get_region_info (region_address_of (index)); if (is_free_region (region)) { if (n >= current_num_units) { n -= current_num_units; region_free_list::unlink_region (region); region_free_list::add_region (region, to_free_list); } else { break; } } } current_index -= current_num_units; } } #endif //USE_REGIONS inline uint8_t* align_on_segment (uint8_t* add) { return (uint8_t*)((size_t)(add + (((size_t)1 << gc_heap::min_segment_size_shr) - 1)) & ~(((size_t)1 << gc_heap::min_segment_size_shr) - 1)); } inline uint8_t* align_lower_segment (uint8_t* add) { return (uint8_t*)((size_t)(add) & ~(((size_t)1 << gc_heap::min_segment_size_shr) - 1)); } size_t size_seg_mapping_table_of (uint8_t* from, uint8_t* end) { from = align_lower_segment (from); end = align_on_segment (end); dprintf (1, ("from: %Ix, end: %Ix, size: %Ix", from, end, sizeof (seg_mapping)*(((size_t)(end - from) >> gc_heap::min_segment_size_shr)))); return sizeof (seg_mapping)*((size_t)(end - from) >> gc_heap::min_segment_size_shr); } inline size_t seg_mapping_word_of (uint8_t* add) { return (size_t)add >> gc_heap::min_segment_size_shr; } #ifdef FEATURE_BASICFREEZE inline size_t ro_seg_begin_index (heap_segment* seg) { #ifdef USE_REGIONS size_t begin_index = (size_t)heap_segment_mem (seg) >> gc_heap::min_segment_size_shr; #else size_t begin_index = (size_t)seg >> gc_heap::min_segment_size_shr; #endif //USE_REGIONS begin_index = max (begin_index, (size_t)g_gc_lowest_address >> gc_heap::min_segment_size_shr); return begin_index; } inline size_t ro_seg_end_index (heap_segment* seg) { size_t end_index = (size_t)(heap_segment_reserved (seg) - 1) >> gc_heap::min_segment_size_shr; end_index = min (end_index, (size_t)g_gc_highest_address >> gc_heap::min_segment_size_shr); return end_index; } void seg_mapping_table_add_ro_segment (heap_segment* seg) { if ((heap_segment_reserved (seg) <= g_gc_lowest_address) || (heap_segment_mem (seg) >= g_gc_highest_address)) return; for (size_t entry_index = ro_seg_begin_index (seg); entry_index <= ro_seg_end_index (seg); entry_index++) { #ifdef USE_REGIONS heap_segment* region = (heap_segment*)&seg_mapping_table[entry_index]; heap_segment_allocated (region) = (uint8_t*)ro_in_entry; #else seg_mapping_table[entry_index].seg1 = (heap_segment*)((size_t)seg_mapping_table[entry_index].seg1 | ro_in_entry); #endif //USE_REGIONS } } void seg_mapping_table_remove_ro_segment (heap_segment* seg) { UNREFERENCED_PARAMETER(seg); #if 0 // POSSIBLE PERF TODO: right now we are not doing anything because we can't simply remove the flag. If it proves // to be a perf problem, we can search in the current ro segs and see if any lands in this range and only // remove the flag if none lands in this range. #endif //0 } heap_segment* ro_segment_lookup (uint8_t* o) { uint8_t* ro_seg_start = o; heap_segment* seg = (heap_segment*)gc_heap::seg_table->lookup (ro_seg_start); if (ro_seg_start && in_range_for_segment (o, seg)) return seg; else return 0; } #endif //FEATURE_BASICFREEZE void gc_heap::seg_mapping_table_add_segment (heap_segment* seg, gc_heap* hp) { #ifndef USE_REGIONS size_t seg_end = (size_t)(heap_segment_reserved (seg) - 1); size_t begin_index = (size_t)seg >> gc_heap::min_segment_size_shr; seg_mapping* begin_entry = &seg_mapping_table[begin_index]; size_t end_index = seg_end >> gc_heap::min_segment_size_shr; seg_mapping* end_entry = &seg_mapping_table[end_index]; dprintf (2, ("adding seg %Ix(%d)-%Ix(%d)", seg, begin_index, heap_segment_reserved (seg), end_index)); dprintf (2, ("before add: begin entry%d: boundary: %Ix; end entry: %d: boundary: %Ix", begin_index, (seg_mapping_table[begin_index].boundary + 1), end_index, (seg_mapping_table[end_index].boundary + 1))); #ifdef MULTIPLE_HEAPS #ifdef SIMPLE_DPRINTF dprintf (2, ("begin %d: h0: %Ix(%d), h1: %Ix(%d); end %d: h0: %Ix(%d), h1: %Ix(%d)", begin_index, (uint8_t*)(begin_entry->h0), (begin_entry->h0 ? begin_entry->h0->heap_number : -1), (uint8_t*)(begin_entry->h1), (begin_entry->h1 ? begin_entry->h1->heap_number : -1), end_index, (uint8_t*)(end_entry->h0), (end_entry->h0 ? end_entry->h0->heap_number : -1), (uint8_t*)(end_entry->h1), (end_entry->h1 ? end_entry->h1->heap_number : -1))); #endif //SIMPLE_DPRINTF assert (end_entry->boundary == 0); assert (end_entry->h0 == 0); end_entry->h0 = hp; assert (begin_entry->h1 == 0); begin_entry->h1 = hp; #else UNREFERENCED_PARAMETER(hp); #endif //MULTIPLE_HEAPS end_entry->boundary = (uint8_t*)seg_end; dprintf (2, ("set entry %d seg1 and %d seg0 to %Ix", begin_index, end_index, seg)); assert ((begin_entry->seg1 == 0) || ((size_t)(begin_entry->seg1) == ro_in_entry)); begin_entry->seg1 = (heap_segment*)((size_t)(begin_entry->seg1) | (size_t)seg); end_entry->seg0 = seg; // for every entry inbetween we need to set its heap too. for (size_t entry_index = (begin_index + 1); entry_index <= (end_index - 1); entry_index++) { assert (seg_mapping_table[entry_index].boundary == 0); #ifdef MULTIPLE_HEAPS assert (seg_mapping_table[entry_index].h0 == 0); seg_mapping_table[entry_index].h1 = hp; #endif //MULTIPLE_HEAPS seg_mapping_table[entry_index].seg1 = seg; } dprintf (2, ("after add: begin entry%d: boundary: %Ix; end entry: %d: boundary: %Ix", begin_index, (seg_mapping_table[begin_index].boundary + 1), end_index, (seg_mapping_table[end_index].boundary + 1))); #if defined(MULTIPLE_HEAPS) && defined(SIMPLE_DPRINTF) dprintf (2, ("begin %d: h0: %Ix(%d), h1: %Ix(%d); end: %d h0: %Ix(%d), h1: %Ix(%d)", begin_index, (uint8_t*)(begin_entry->h0), (begin_entry->h0 ? begin_entry->h0->heap_number : -1), (uint8_t*)(begin_entry->h1), (begin_entry->h1 ? begin_entry->h1->heap_number : -1), end_index, (uint8_t*)(end_entry->h0), (end_entry->h0 ? end_entry->h0->heap_number : -1), (uint8_t*)(end_entry->h1), (end_entry->h1 ? end_entry->h1->heap_number : -1))); #endif //MULTIPLE_HEAPS && SIMPLE_DPRINTF #endif //!USE_REGIONS } void gc_heap::seg_mapping_table_remove_segment (heap_segment* seg) { #ifndef USE_REGIONS size_t seg_end = (size_t)(heap_segment_reserved (seg) - 1); size_t begin_index = (size_t)seg >> gc_heap::min_segment_size_shr; seg_mapping* begin_entry = &seg_mapping_table[begin_index]; size_t end_index = seg_end >> gc_heap::min_segment_size_shr; seg_mapping* end_entry = &seg_mapping_table[end_index]; dprintf (2, ("removing seg %Ix(%d)-%Ix(%d)", seg, begin_index, heap_segment_reserved (seg), end_index)); assert (end_entry->boundary == (uint8_t*)seg_end); end_entry->boundary = 0; #ifdef MULTIPLE_HEAPS gc_heap* hp = heap_segment_heap (seg); assert (end_entry->h0 == hp); end_entry->h0 = 0; assert (begin_entry->h1 == hp); begin_entry->h1 = 0; #endif //MULTIPLE_HEAPS assert (begin_entry->seg1 != 0); begin_entry->seg1 = (heap_segment*)((size_t)(begin_entry->seg1) & ro_in_entry); end_entry->seg0 = 0; // for every entry inbetween we need to reset its heap too. for (size_t entry_index = (begin_index + 1); entry_index <= (end_index - 1); entry_index++) { assert (seg_mapping_table[entry_index].boundary == 0); #ifdef MULTIPLE_HEAPS assert (seg_mapping_table[entry_index].h0 == 0); assert (seg_mapping_table[entry_index].h1 == hp); seg_mapping_table[entry_index].h1 = 0; #endif //MULTIPLE_HEAPS seg_mapping_table[entry_index].seg1 = 0; } dprintf (2, ("after remove: begin entry%d: boundary: %Ix; end entry: %d: boundary: %Ix", begin_index, (seg_mapping_table[begin_index].boundary + 1), end_index, (seg_mapping_table[end_index].boundary + 1))); #ifdef MULTIPLE_HEAPS dprintf (2, ("begin %d: h0: %Ix, h1: %Ix; end: %d h0: %Ix, h1: %Ix", begin_index, (uint8_t*)(begin_entry->h0), (uint8_t*)(begin_entry->h1), end_index, (uint8_t*)(end_entry->h0), (uint8_t*)(end_entry->h1))); #endif //MULTIPLE_HEAPS #endif //!USE_REGIONS } #ifdef MULTIPLE_HEAPS inline gc_heap* seg_mapping_table_heap_of_worker (uint8_t* o) { size_t index = (size_t)o >> gc_heap::min_segment_size_shr; seg_mapping* entry = &seg_mapping_table[index]; #ifdef USE_REGIONS gc_heap* hp = heap_segment_heap ((heap_segment*)entry); #else gc_heap* hp = ((o > entry->boundary) ? entry->h1 : entry->h0); dprintf (2, ("checking obj %Ix, index is %Id, entry: boundary: %Ix, h0: %Ix, seg0: %Ix, h1: %Ix, seg1: %Ix", o, index, (entry->boundary + 1), (uint8_t*)(entry->h0), (uint8_t*)(entry->seg0), (uint8_t*)(entry->h1), (uint8_t*)(entry->seg1))); #ifdef _DEBUG heap_segment* seg = ((o > entry->boundary) ? entry->seg1 : entry->seg0); #ifdef FEATURE_BASICFREEZE if ((size_t)seg & ro_in_entry) seg = (heap_segment*)((size_t)seg & ~ro_in_entry); #endif //FEATURE_BASICFREEZE #ifdef TRACE_GC if (seg) { if (in_range_for_segment (o, seg)) { dprintf (2, ("obj %Ix belongs to segment %Ix(-%Ix)", o, seg, (uint8_t*)heap_segment_allocated (seg))); } else { dprintf (2, ("found seg %Ix(-%Ix) for obj %Ix, but it's not on the seg", seg, (uint8_t*)heap_segment_allocated (seg), o)); } } else { dprintf (2, ("could not find obj %Ix in any existing segments", o)); } #endif //TRACE_GC #endif //_DEBUG #endif //USE_REGIONS return hp; } gc_heap* seg_mapping_table_heap_of (uint8_t* o) { if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address)) return 0; return seg_mapping_table_heap_of_worker (o); } gc_heap* seg_mapping_table_heap_of_gc (uint8_t* o) { #ifdef FEATURE_BASICFREEZE if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address)) return 0; #endif //FEATURE_BASICFREEZE return seg_mapping_table_heap_of_worker (o); } #endif //MULTIPLE_HEAPS // Only returns a valid seg if we can actually find o on the seg. heap_segment* seg_mapping_table_segment_of (uint8_t* o) { #ifdef FEATURE_BASICFREEZE if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address)) return ro_segment_lookup (o); #endif //FEATURE_BASICFREEZE size_t index = (size_t)o >> gc_heap::min_segment_size_shr; seg_mapping* entry = &seg_mapping_table[index]; #ifdef USE_REGIONS // REGIONS TODO: I think we could simplify this to having the same info for each // basic entry in a large region so we can get it right away instead of having to go // back some entries. ptrdiff_t first_field = (ptrdiff_t)heap_segment_allocated ((heap_segment*)entry); if (first_field == 0) { dprintf (REGIONS_LOG, ("asked for seg for %Ix, in a freed region mem: %Ix, committed %Ix", o, heap_segment_mem ((heap_segment*)entry), heap_segment_committed ((heap_segment*)entry))); return 0; } // Regions are never going to intersect an ro seg, so this can never be ro_in_entry. assert (first_field != 0); assert (first_field != ro_in_entry); if (first_field < 0) { index += first_field; } heap_segment* seg = (heap_segment*)&seg_mapping_table[index]; #else //USE_REGIONS dprintf (2, ("checking obj %Ix, index is %Id, entry: boundary: %Ix, seg0: %Ix, seg1: %Ix", o, index, (entry->boundary + 1), (uint8_t*)(entry->seg0), (uint8_t*)(entry->seg1))); heap_segment* seg = ((o > entry->boundary) ? entry->seg1 : entry->seg0); #ifdef FEATURE_BASICFREEZE if ((size_t)seg & ro_in_entry) seg = (heap_segment*)((size_t)seg & ~ro_in_entry); #endif //FEATURE_BASICFREEZE #endif //USE_REGIONS if (seg) { if (in_range_for_segment (o, seg)) { dprintf (2, ("obj %Ix belongs to segment %Ix(-%Ix)", o, (uint8_t*)heap_segment_mem(seg), (uint8_t*)heap_segment_reserved(seg))); } else { dprintf (2, ("found seg %Ix(-%Ix) for obj %Ix, but it's not on the seg, setting it to 0", (uint8_t*)heap_segment_mem(seg), (uint8_t*)heap_segment_reserved(seg), o)); seg = 0; } } else { dprintf (2, ("could not find obj %Ix in any existing segments", o)); } #ifdef FEATURE_BASICFREEZE // TODO: This was originally written assuming that the seg_mapping_table would always contain entries for ro // segments whenever the ro segment falls into the [g_gc_lowest_address,g_gc_highest_address) range. I.e., it had an // extra "&& (size_t)(entry->seg1) & ro_in_entry" expression. However, at the moment, grow_brick_card_table does // not correctly go through the ro segments and add them back to the seg_mapping_table when the [lowest,highest) // range changes. We should probably go ahead and modify grow_brick_card_table and put back the // "&& (size_t)(entry->seg1) & ro_in_entry" here. if (!seg) { seg = ro_segment_lookup (o); if (seg && !in_range_for_segment (o, seg)) seg = 0; } #endif //FEATURE_BASICFREEZE return seg; } size_t gcard_of ( uint8_t*); #define GC_MARKED (size_t)0x1 #ifdef DOUBLY_LINKED_FL // This bit indicates that we'll need to set the bgc mark bit for this object during an FGC. // We only do this when we decide to compact. #define BGC_MARKED_BY_FGC (size_t)0x2 #define MAKE_FREE_OBJ_IN_COMPACT (size_t)0x4 #define ALLOWED_SPECIAL_HEADER_BITS (GC_MARKED|BGC_MARKED_BY_FGC|MAKE_FREE_OBJ_IN_COMPACT) #else //DOUBLY_LINKED_FL #define ALLOWED_SPECIAL_HEADER_BITS (GC_MARKED) #endif //!DOUBLY_LINKED_FL #ifdef HOST_64BIT #define SPECIAL_HEADER_BITS (0x7) #else #define SPECIAL_HEADER_BITS (0x3) #endif #define slot(i, j) ((uint8_t**)(i))[(j)+1] #define free_object_base_size (plug_skew + sizeof(ArrayBase)) #define free_list_slot(x) ((uint8_t**)(x))[2] #define free_list_undo(x) ((uint8_t**)(x))[-1] #define UNDO_EMPTY ((uint8_t*)1) #ifdef DOUBLY_LINKED_FL #define free_list_prev(x) ((uint8_t**)(x))[3] #define PREV_EMPTY ((uint8_t*)1) void check_and_clear_in_free_list (uint8_t* o, size_t size) { if (size >= min_free_list) { free_list_prev (o) = PREV_EMPTY; } } // This is used when we need to clear the prev bit for a free object we made because we know // it's not actually a free obj (it's just a temporary thing during allocation). void clear_prev_bit (uint8_t* o, size_t size) { if (size >= min_free_list) { free_list_prev (o) = 0; } } #endif //DOUBLY_LINKED_FL class CObjectHeader : public Object { public: #if defined(FEATURE_REDHAWK) || defined(BUILD_AS_STANDALONE) // The GC expects the following methods that are provided by the Object class in the CLR but not provided // by Redhawk's version of Object. uint32_t GetNumComponents() { return ((ArrayBase *)this)->GetNumComponents(); } void Validate(BOOL bDeep=TRUE) { MethodTable * pMT = GetMethodTable(); _ASSERTE(pMT->SanityCheck()); bool noRangeChecks = (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_NO_RANGE_CHECKS) == GCConfig::HEAPVERIFY_NO_RANGE_CHECKS; BOOL fSmallObjectHeapPtr = FALSE, fLargeObjectHeapPtr = FALSE; if (!noRangeChecks) { fSmallObjectHeapPtr = g_theGCHeap->IsHeapPointer(this, TRUE); if (!fSmallObjectHeapPtr) fLargeObjectHeapPtr = g_theGCHeap->IsHeapPointer(this); _ASSERTE(fSmallObjectHeapPtr || fLargeObjectHeapPtr); } #ifdef FEATURE_STRUCTALIGN _ASSERTE(IsStructAligned((uint8_t *)this, GetMethodTable()->GetBaseAlignment())); #endif // FEATURE_STRUCTALIGN #if defined(FEATURE_64BIT_ALIGNMENT) && !defined(FEATURE_REDHAWK) if (pMT->RequiresAlign8()) { _ASSERTE((((size_t)this) & 0x7) == (pMT->IsValueType() ? 4U : 0U)); } #endif // FEATURE_64BIT_ALIGNMENT #ifdef VERIFY_HEAP if (bDeep && (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)) g_theGCHeap->ValidateObjectMember(this); #endif if (fSmallObjectHeapPtr) { #ifdef FEATURE_BASICFREEZE _ASSERTE(!g_theGCHeap->IsLargeObject(this) || g_theGCHeap->IsInFrozenSegment(this)); #else _ASSERTE(!g_theGCHeap->IsLargeObject(this)); #endif } } void ValidateHeap(BOOL bDeep) { Validate(bDeep); } #endif //FEATURE_REDHAWK || BUILD_AS_STANDALONE ///// // // Header Status Information // MethodTable *GetMethodTable() const { return( (MethodTable *) (((size_t) RawGetMethodTable()) & (~SPECIAL_HEADER_BITS))); } void SetMarked() { _ASSERTE(RawGetMethodTable()); RawSetMethodTable((MethodTable *) (((size_t) RawGetMethodTable()) | GC_MARKED)); } BOOL IsMarked() const { return !!(((size_t)RawGetMethodTable()) & GC_MARKED); } void SetPinned() { assert (!(gc_heap::settings.concurrent)); GetHeader()->SetGCBit(); } BOOL IsPinned() const { return !!((((CObjectHeader*)this)->GetHeader()->GetBits()) & BIT_SBLK_GC_RESERVE); } // Now we set more bits should actually only clear the mark bit void ClearMarked() { #ifdef DOUBLY_LINKED_FL RawSetMethodTable ((MethodTable *)(((size_t) RawGetMethodTable()) & (~GC_MARKED))); #else RawSetMethodTable (GetMethodTable()); #endif //DOUBLY_LINKED_FL } #ifdef DOUBLY_LINKED_FL void SetBGCMarkBit() { RawSetMethodTable((MethodTable *) (((size_t) RawGetMethodTable()) | BGC_MARKED_BY_FGC)); } BOOL IsBGCMarkBitSet() const { return !!(((size_t)RawGetMethodTable()) & BGC_MARKED_BY_FGC); } void ClearBGCMarkBit() { RawSetMethodTable((MethodTable *)(((size_t) RawGetMethodTable()) & (~BGC_MARKED_BY_FGC))); } void SetFreeObjInCompactBit() { RawSetMethodTable((MethodTable *) (((size_t) RawGetMethodTable()) | MAKE_FREE_OBJ_IN_COMPACT)); } BOOL IsFreeObjInCompactBitSet() const { return !!(((size_t)RawGetMethodTable()) & MAKE_FREE_OBJ_IN_COMPACT); } void ClearFreeObjInCompactBit() { #ifdef _DEBUG // check this looks like an object, but do NOT validate pointers to other objects // as these may not be valid yet - we are calling this during compact_phase Validate(FALSE); #endif //_DEBUG RawSetMethodTable((MethodTable *)(((size_t) RawGetMethodTable()) & (~MAKE_FREE_OBJ_IN_COMPACT))); } #endif //DOUBLY_LINKED_FL size_t ClearSpecialBits() { size_t special_bits = ((size_t)RawGetMethodTable()) & SPECIAL_HEADER_BITS; if (special_bits != 0) { assert ((special_bits & (~ALLOWED_SPECIAL_HEADER_BITS)) == 0); RawSetMethodTable ((MethodTable*)(((size_t)RawGetMethodTable()) & ~(SPECIAL_HEADER_BITS))); } return special_bits; } void SetSpecialBits (size_t special_bits) { assert ((special_bits & (~ALLOWED_SPECIAL_HEADER_BITS)) == 0); if (special_bits != 0) { RawSetMethodTable ((MethodTable*)(((size_t)RawGetMethodTable()) | special_bits)); } } CGCDesc *GetSlotMap () { assert (GetMethodTable()->ContainsPointers()); return CGCDesc::GetCGCDescFromMT(GetMethodTable()); } void SetFree(size_t size) { assert (size >= free_object_base_size); assert (g_gc_pFreeObjectMethodTable->GetBaseSize() == free_object_base_size); assert (g_gc_pFreeObjectMethodTable->RawGetComponentSize() == 1); RawSetMethodTable( g_gc_pFreeObjectMethodTable ); size_t* numComponentsPtr = (size_t*) &((uint8_t*) this)[ArrayBase::GetOffsetOfNumComponents()]; *numComponentsPtr = size - free_object_base_size; #ifdef VERIFY_HEAP //This introduces a bug in the free list management. //((void**) this)[-1] = 0; // clear the sync block, assert (*numComponentsPtr >= 0); if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { memset (((uint8_t*)this)+sizeof(ArrayBase), 0xcc, *numComponentsPtr); #ifdef DOUBLY_LINKED_FL // However, in this case we can't leave the Next field uncleared because no one will clear it // so it remains 0xcc and that's not good for verification if (*numComponentsPtr > 0) { free_list_slot (this) = 0; } #endif //DOUBLY_LINKED_FL } #endif //VERIFY_HEAP #ifdef DOUBLY_LINKED_FL // For background GC, we need to distinguish between a free object that's not on the free list // and one that is. So we always set its prev to PREV_EMPTY to indicate that it's a free // object that's not on the free list. If it should be on the free list, it will be set to the // appropriate non zero value. check_and_clear_in_free_list ((uint8_t*)this, size); #endif //DOUBLY_LINKED_FL } void UnsetFree() { size_t size = free_object_base_size - plug_skew; // since we only need to clear 2 ptr size, we do it manually PTR_PTR m = (PTR_PTR) this; for (size_t i = 0; i < size / sizeof(PTR_PTR); i++) *(m++) = 0; } BOOL IsFree () const { return (GetMethodTable() == g_gc_pFreeObjectMethodTable); } #ifdef FEATURE_STRUCTALIGN int GetRequiredAlignment () const { return GetMethodTable()->GetRequiredAlignment(); } #endif // FEATURE_STRUCTALIGN BOOL ContainsPointers() const { return GetMethodTable()->ContainsPointers(); } #ifdef COLLECTIBLE_CLASS BOOL Collectible() const { return GetMethodTable()->Collectible(); } FORCEINLINE BOOL ContainsPointersOrCollectible() const { MethodTable *pMethodTable = GetMethodTable(); return (pMethodTable->ContainsPointers() || pMethodTable->Collectible()); } #endif //COLLECTIBLE_CLASS Object* GetObjectBase() const { return (Object*) this; } }; #define header(i) ((CObjectHeader*)(i)) #ifdef DOUBLY_LINKED_FL inline BOOL is_on_free_list (uint8_t* o, size_t size) { if (size >= min_free_list) { if (header(o)->GetMethodTable() == g_gc_pFreeObjectMethodTable) { return (free_list_prev (o) != PREV_EMPTY); } } return FALSE; } inline void set_plug_bgc_mark_bit (uint8_t* node) { header(node)->SetBGCMarkBit(); } inline BOOL is_plug_bgc_mark_bit_set (uint8_t* node) { return header(node)->IsBGCMarkBitSet(); } inline void clear_plug_bgc_mark_bit (uint8_t* node) { header(node)->ClearBGCMarkBit(); } inline void set_free_obj_in_compact_bit (uint8_t* node) { header(node)->SetFreeObjInCompactBit(); } inline BOOL is_free_obj_in_compact_bit_set (uint8_t* node) { return header(node)->IsFreeObjInCompactBitSet(); } inline void clear_free_obj_in_compact_bit (uint8_t* node) { header(node)->ClearFreeObjInCompactBit(); } #endif //DOUBLY_LINKED_FL #ifdef SHORT_PLUGS inline void set_plug_padded (uint8_t* node) { header(node)->SetMarked(); } inline void clear_plug_padded (uint8_t* node) { header(node)->ClearMarked(); } inline BOOL is_plug_padded (uint8_t* node) { return header(node)->IsMarked(); } #else //SHORT_PLUGS inline void set_plug_padded (uint8_t* node){} inline void clear_plug_padded (uint8_t* node){} inline BOOL is_plug_padded (uint8_t* node){return FALSE;} #endif //SHORT_PLUGS inline size_t clear_special_bits (uint8_t* node) { return header(node)->ClearSpecialBits(); } inline void set_special_bits (uint8_t* node, size_t special_bits) { header(node)->SetSpecialBits (special_bits); } inline size_t unused_array_size(uint8_t * p) { assert(((CObjectHeader*)p)->IsFree()); size_t* numComponentsPtr = (size_t*)(p + ArrayBase::GetOffsetOfNumComponents()); return free_object_base_size + *numComponentsPtr; } inline heap_segment* heap_segment_non_sip (heap_segment* ns) { #ifdef USE_REGIONS if ((ns == 0) || !heap_segment_swept_in_plan (ns)) { return ns; } else { do { if (heap_segment_swept_in_plan (ns)) { dprintf (REGIONS_LOG, ("region %Ix->%Ix SIP", heap_segment_mem (ns), heap_segment_allocated (ns))); } ns = heap_segment_next (ns); } while ((ns != 0) && heap_segment_swept_in_plan (ns)); return ns; } #else //USE_REGIONS return ns; #endif //USE_REGIONS } inline heap_segment* heap_segment_next_non_sip (heap_segment* seg) { heap_segment* ns = heap_segment_next (seg); #ifdef USE_REGIONS return heap_segment_non_sip (ns); #else return ns; #endif //USE_REGIONS } heap_segment* heap_segment_rw (heap_segment* ns) { if ((ns == 0) || !heap_segment_read_only_p (ns)) { return ns; } else { do { ns = heap_segment_next (ns); } while ((ns != 0) && heap_segment_read_only_p (ns)); return ns; } } //returns the next non ro segment. heap_segment* heap_segment_next_rw (heap_segment* seg) { heap_segment* ns = heap_segment_next (seg); return heap_segment_rw (ns); } // returns the segment before seg. heap_segment* heap_segment_prev_rw (heap_segment* begin, heap_segment* seg) { assert (begin != 0); heap_segment* prev = begin; heap_segment* current = heap_segment_next_rw (begin); while (current && current != seg) { prev = current; current = heap_segment_next_rw (current); } if (current == seg) { return prev; } else { return 0; } } // returns the segment before seg. heap_segment* heap_segment_prev (heap_segment* begin, heap_segment* seg) { assert (begin != 0); heap_segment* prev = begin; heap_segment* current = heap_segment_next (begin); while (current && current != seg) { prev = current; current = heap_segment_next (current); } if (current == seg) { return prev; } else { return 0; } } heap_segment* heap_segment_in_range (heap_segment* ns) { if ((ns == 0) || heap_segment_in_range_p (ns)) { return ns; } else { do { ns = heap_segment_next (ns); } while ((ns != 0) && !heap_segment_in_range_p (ns)); return ns; } } heap_segment* heap_segment_next_in_range (heap_segment* seg) { heap_segment* ns = heap_segment_next (seg); return heap_segment_in_range (ns); } struct imemory_data { uint8_t* memory_base; }; struct numa_reserved_block { uint8_t* memory_base; size_t block_size; numa_reserved_block() : memory_base(nullptr), block_size(0) { } }; struct initial_memory_details { imemory_data *initial_memory; imemory_data *initial_normal_heap; // points into initial_memory_array imemory_data *initial_large_heap; // points into initial_memory_array imemory_data *initial_pinned_heap; // points into initial_memory_array size_t block_size_normal; size_t block_size_large; size_t block_size_pinned; int block_count; // # of blocks in each int current_block_normal; int current_block_large; int current_block_pinned; enum { ALLATONCE = 1, EACH_GENERATION, EACH_BLOCK, ALLATONCE_SEPARATED_POH, EACH_NUMA_NODE }; size_t allocation_pattern; size_t block_size(int i) { switch (i / block_count) { case 0: return block_size_normal; case 1: return block_size_large; case 2: return block_size_pinned; default: __UNREACHABLE(); } }; void* get_initial_memory (int gen, int h_number) { switch (gen) { case soh_gen0: case soh_gen1: case soh_gen2: return initial_normal_heap[h_number].memory_base; case loh_generation: return initial_large_heap[h_number].memory_base; case poh_generation: return initial_pinned_heap[h_number].memory_base; default: __UNREACHABLE(); } }; size_t get_initial_size (int gen) { switch (gen) { case soh_gen0: case soh_gen1: case soh_gen2: return block_size_normal; case loh_generation: return block_size_large; case poh_generation: return block_size_pinned; default: __UNREACHABLE(); } }; int numa_reserved_block_count; numa_reserved_block* numa_reserved_block_table; }; initial_memory_details memory_details; BOOL gc_heap::reserve_initial_memory (size_t normal_size, size_t large_size, size_t pinned_size, int num_heaps, bool use_large_pages_p, bool separated_poh_p, uint16_t* heap_no_to_numa_node) { BOOL reserve_success = FALSE; // should only be called once assert (memory_details.initial_memory == 0); // soh + loh + poh segments * num_heaps memory_details.initial_memory = new (nothrow) imemory_data[num_heaps * (total_generation_count - ephemeral_generation_count)]; if (memory_details.initial_memory == 0) { dprintf (2, ("failed to reserve %Id bytes for imemory_data", num_heaps * (total_generation_count - ephemeral_generation_count) * sizeof (imemory_data))); return FALSE; } memory_details.initial_normal_heap = memory_details.initial_memory; memory_details.initial_large_heap = memory_details.initial_normal_heap + num_heaps; memory_details.initial_pinned_heap = memory_details.initial_large_heap + num_heaps; memory_details.block_size_normal = normal_size; memory_details.block_size_large = large_size; memory_details.block_size_pinned = pinned_size; memory_details.block_count = num_heaps; memory_details.current_block_normal = 0; memory_details.current_block_large = 0; memory_details.current_block_pinned = 0; g_gc_lowest_address = MAX_PTR; g_gc_highest_address = 0; if (((size_t)MAX_PTR - large_size) < normal_size) { // we are already overflowing with just one heap. dprintf (2, ("0x%Ix + 0x%Ix already overflow", normal_size, large_size)); return FALSE; } if (((size_t)MAX_PTR / memory_details.block_count) < (normal_size + large_size + pinned_size)) { dprintf (2, ("(0x%Ix + 0x%Ix)*0x%Ix overflow", normal_size, large_size, memory_details.block_count)); return FALSE; } // figure out number of NUMA nodes and allocate additional table for NUMA local reservation memory_details.numa_reserved_block_count = 0; memory_details.numa_reserved_block_table = nullptr; int numa_node_count = 0; if (heap_no_to_numa_node != nullptr) { uint16_t highest_numa_node = 0; // figure out the highest NUMA node for (int heap_no = 0; heap_no < num_heaps; heap_no++) { uint16_t heap_numa_node = heap_no_to_numa_node[heap_no]; highest_numa_node = max (highest_numa_node, heap_numa_node); } assert (highest_numa_node < MAX_SUPPORTED_CPUS); numa_node_count = highest_numa_node + 1; memory_details.numa_reserved_block_count = numa_node_count * (1 + separated_poh_p); memory_details.numa_reserved_block_table = new (nothrow) numa_reserved_block[memory_details.numa_reserved_block_count]; if (memory_details.numa_reserved_block_table == nullptr) { // we couldn't get the memory - continue as if doing the non-NUMA case dprintf(2, ("failed to reserve %Id bytes for numa_reserved_block data", memory_details.numa_reserved_block_count * sizeof(numa_reserved_block))); memory_details.numa_reserved_block_count = 0; } } if (memory_details.numa_reserved_block_table != nullptr) { // figure out how much to reserve on each NUMA node // note this can be very different between NUMA nodes, depending on // which processors our heaps are associated with size_t merged_pinned_size = separated_poh_p ? 0 : pinned_size; for (int heap_no = 0; heap_no < num_heaps; heap_no++) { uint16_t heap_numa_node = heap_no_to_numa_node[heap_no]; numa_reserved_block * block = &memory_details.numa_reserved_block_table[heap_numa_node]; // add the size required for this heap block->block_size += normal_size + large_size + merged_pinned_size; if (separated_poh_p) { numa_reserved_block* pinned_block = &memory_details.numa_reserved_block_table[numa_node_count + heap_numa_node]; // add the pinned size required for this heap pinned_block->block_size += pinned_size; } } // reserve the appropriate size on each NUMA node bool failure = false; for (int block_index = 0; block_index < memory_details.numa_reserved_block_count; block_index++) { numa_reserved_block * block = &memory_details.numa_reserved_block_table[block_index]; if (block->block_size == 0) continue; int numa_node = block_index % numa_node_count; bool pinned_block = block_index >= numa_node_count; block->memory_base = (uint8_t*)virtual_alloc (block->block_size, use_large_pages_p && !pinned_block, numa_node); if (block->memory_base == nullptr) { dprintf(2, ("failed to reserve %Id bytes for on NUMA node %u", block->block_size, numa_node)); failure = true; break; } else { g_gc_lowest_address = min(g_gc_lowest_address, block->memory_base); g_gc_highest_address = max(g_gc_highest_address, block->memory_base + block->block_size); } } if (failure) { // if we had any failures, undo the work done so far // we will instead use one of the other allocation patterns // we could try to use what we did succeed to reserve, but that gets complicated for (int block_index = 0; block_index < memory_details.numa_reserved_block_count; block_index++) { numa_reserved_block * block = &memory_details.numa_reserved_block_table[block_index]; if (block->memory_base != nullptr) { virtual_free(block->memory_base, block->block_size); block->memory_base = nullptr; } } delete [] memory_details.numa_reserved_block_table; memory_details.numa_reserved_block_table = nullptr; memory_details.numa_reserved_block_count = 0; } else { // for each NUMA node, give out the memory to its heaps for (uint16_t numa_node = 0; numa_node < numa_node_count; numa_node++) { numa_reserved_block * block = &memory_details.numa_reserved_block_table[numa_node]; numa_reserved_block* pinned_block = separated_poh_p ? &memory_details.numa_reserved_block_table[numa_node_count + numa_node] : nullptr; // if the block's size is 0, there can be no heaps on this NUMA node if (block->block_size == 0) { assert((pinned_block == nullptr) || (pinned_block->block_size == 0)); continue; } uint8_t* memory_base = block->memory_base; uint8_t* pinned_memory_base = ((pinned_block == nullptr) ? nullptr : pinned_block->memory_base); for (int heap_no = 0; heap_no < num_heaps; heap_no++) { uint16_t heap_numa_node = heap_no_to_numa_node[heap_no]; if (heap_numa_node != numa_node) { // this heap is on another NUMA node continue; } memory_details.initial_normal_heap[heap_no].memory_base = memory_base; memory_base += normal_size; memory_details.initial_large_heap[heap_no].memory_base = memory_base; memory_base += large_size; if (separated_poh_p) { memory_details.initial_pinned_heap[heap_no].memory_base = pinned_memory_base; pinned_memory_base += pinned_size; } else { memory_details.initial_pinned_heap[heap_no].memory_base = memory_base; memory_base += pinned_size; } } // sanity check - we should be at the end of the memory block for this NUMA node assert (memory_base == block->memory_base + block->block_size); assert ((pinned_block == nullptr) || (pinned_memory_base == pinned_block->memory_base + pinned_block->block_size)); } memory_details.allocation_pattern = initial_memory_details::EACH_NUMA_NODE; reserve_success = TRUE; } } if (!reserve_success) { size_t temp_pinned_size = (separated_poh_p ? 0 : pinned_size); size_t separate_pinned_size = memory_details.block_count * pinned_size; size_t requestedMemory = memory_details.block_count * (normal_size + large_size + temp_pinned_size); uint8_t* allatonce_block = (uint8_t*)virtual_alloc(requestedMemory, use_large_pages_p); uint8_t* separated_poh_block = nullptr; if (allatonce_block && separated_poh_p) { separated_poh_block = (uint8_t*)virtual_alloc(separate_pinned_size, false); if (!separated_poh_block) { virtual_free(allatonce_block, requestedMemory); allatonce_block = nullptr; } } if (allatonce_block) { if (separated_poh_p) { g_gc_lowest_address = min(allatonce_block, separated_poh_block); g_gc_highest_address = max((allatonce_block + requestedMemory), (separated_poh_block + separate_pinned_size)); memory_details.allocation_pattern = initial_memory_details::ALLATONCE_SEPARATED_POH; } else { g_gc_lowest_address = allatonce_block; g_gc_highest_address = allatonce_block + requestedMemory; memory_details.allocation_pattern = initial_memory_details::ALLATONCE; } for (int i = 0; i < memory_details.block_count; i++) { memory_details.initial_normal_heap[i].memory_base = allatonce_block + (i * normal_size); memory_details.initial_large_heap[i].memory_base = allatonce_block + (memory_details.block_count * normal_size) + (i * large_size); if (separated_poh_p) { memory_details.initial_pinned_heap[i].memory_base = separated_poh_block + (i * pinned_size); } else { memory_details.initial_pinned_heap[i].memory_base = allatonce_block + (memory_details.block_count * (normal_size + large_size)) + (i * pinned_size); } } reserve_success = TRUE; } else { // try to allocate 3 blocks uint8_t* b1 = (uint8_t*)virtual_alloc(memory_details.block_count * normal_size, use_large_pages_p); uint8_t* b2 = (uint8_t*)virtual_alloc(memory_details.block_count * large_size, use_large_pages_p); uint8_t* b3 = (uint8_t*)virtual_alloc(memory_details.block_count * pinned_size, use_large_pages_p && !separated_poh_p); if (b1 && b2 && b3) { memory_details.allocation_pattern = initial_memory_details::EACH_GENERATION; g_gc_lowest_address = min(b1, min(b2, b3)); g_gc_highest_address = max(b1 + memory_details.block_count * normal_size, max(b2 + memory_details.block_count * large_size, b3 + memory_details.block_count * pinned_size)); for (int i = 0; i < memory_details.block_count; i++) { memory_details.initial_normal_heap[i].memory_base = b1 + (i * normal_size); memory_details.initial_large_heap[i].memory_base = b2 + (i * large_size); memory_details.initial_pinned_heap[i].memory_base = b3 + (i * pinned_size); } reserve_success = TRUE; } else { // allocation failed, we'll go on to try allocating each block. // We could preserve the b1 alloc, but code complexity increases if (b1) virtual_free(b1, memory_details.block_count * normal_size); if (b2) virtual_free(b2, memory_details.block_count * large_size); if (b3) virtual_free(b3, memory_details.block_count * pinned_size); } if ((b2 == NULL) && (memory_details.block_count > 1)) { memory_details.allocation_pattern = initial_memory_details::EACH_BLOCK; imemory_data* current_block = memory_details.initial_memory; for (int i = 0; i < (memory_details.block_count * (total_generation_count - ephemeral_generation_count)); i++, current_block++) { size_t block_size = memory_details.block_size(i); uint16_t numa_node = NUMA_NODE_UNDEFINED; if (heap_no_to_numa_node != nullptr) { int heap_no = i % memory_details.block_count; numa_node = heap_no_to_numa_node[heap_no]; } current_block->memory_base = (uint8_t*)virtual_alloc(block_size, use_large_pages_p, numa_node); if (current_block->memory_base == 0) { // Free the blocks that we've allocated so far current_block = memory_details.initial_memory; for (int j = 0; j < i; j++, current_block++) { if (current_block->memory_base != 0) { block_size = memory_details.block_size(i); virtual_free(current_block->memory_base, block_size); } } reserve_success = FALSE; break; } else { if (current_block->memory_base < g_gc_lowest_address) g_gc_lowest_address = current_block->memory_base; if (((uint8_t*)current_block->memory_base + block_size) > g_gc_highest_address) g_gc_highest_address = (current_block->memory_base + block_size); } reserve_success = TRUE; } } } } if (reserve_success && separated_poh_p) { for (int heap_no = 0; (reserve_success && (heap_no < num_heaps)); heap_no++) { if (!GCToOSInterface::VirtualCommit(memory_details.initial_pinned_heap[heap_no].memory_base, pinned_size)) { reserve_success = FALSE; } } } return reserve_success; } void gc_heap::destroy_initial_memory() { if (memory_details.initial_memory != NULL) { switch (memory_details.allocation_pattern) { case initial_memory_details::ALLATONCE: virtual_free (memory_details.initial_memory[0].memory_base, memory_details.block_count*(memory_details.block_size_normal + memory_details.block_size_large + memory_details.block_size_pinned)); break; case initial_memory_details::ALLATONCE_SEPARATED_POH: virtual_free(memory_details.initial_memory[0].memory_base, memory_details.block_count * (memory_details.block_size_normal + memory_details.block_size_large)); virtual_free(memory_details.initial_pinned_heap[0].memory_base, memory_details.block_count * (memory_details.block_size_pinned)); break; case initial_memory_details::EACH_GENERATION: virtual_free (memory_details.initial_normal_heap[0].memory_base, memory_details.block_count*memory_details.block_size_normal); virtual_free (memory_details.initial_large_heap[0].memory_base, memory_details.block_count*memory_details.block_size_large); virtual_free (memory_details.initial_pinned_heap[0].memory_base, memory_details.block_count*memory_details.block_size_pinned); break; case initial_memory_details::EACH_BLOCK: { imemory_data* current_block = memory_details.initial_memory; int total_block_count = memory_details.block_count * (total_generation_count - ephemeral_generation_count); for (int i = 0; i < total_block_count; i++, current_block++) { size_t block_size = memory_details.block_size (i); if (current_block->memory_base != NULL) { virtual_free (current_block->memory_base, block_size); } } break; } case initial_memory_details::EACH_NUMA_NODE: for (int block_index = 0; block_index < memory_details.numa_reserved_block_count; block_index++) { numa_reserved_block * block = &memory_details.numa_reserved_block_table[block_index]; if (block->memory_base != nullptr) { virtual_free (block->memory_base, block->block_size); } } delete [] memory_details.numa_reserved_block_table; break; default: assert (!"unexpected allocation_pattern"); break; } delete [] memory_details.initial_memory; memory_details.initial_memory = NULL; memory_details.initial_normal_heap = NULL; memory_details.initial_large_heap = NULL; memory_details.initial_pinned_heap = NULL; } } heap_segment* make_initial_segment (int gen, int h_number, gc_heap* hp) { void* mem = memory_details.get_initial_memory (gen, h_number); size_t size = memory_details.get_initial_size (gen); heap_segment* res = gc_heap::make_heap_segment ((uint8_t*)mem, size, hp, gen); return res; } void* virtual_alloc (size_t size) { return virtual_alloc(size, false); } void* virtual_alloc (size_t size, bool use_large_pages_p, uint16_t numa_node) { size_t requested_size = size; if ((gc_heap::reserved_memory_limit - gc_heap::reserved_memory) < requested_size) { gc_heap::reserved_memory_limit = GCScan::AskForMoreReservedMemory (gc_heap::reserved_memory_limit, requested_size); if ((gc_heap::reserved_memory_limit - gc_heap::reserved_memory) < requested_size) { return 0; } } uint32_t flags = VirtualReserveFlags::None; #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP if (virtual_alloc_hardware_write_watch) { flags = VirtualReserveFlags::WriteWatch; } #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP void* prgmem = use_large_pages_p ? GCToOSInterface::VirtualReserveAndCommitLargePages(requested_size, numa_node) : GCToOSInterface::VirtualReserve(requested_size, card_size * card_word_width, flags, numa_node); void *aligned_mem = prgmem; // We don't want (prgmem + size) to be right at the end of the address space // because we'd have to worry about that everytime we do (address + size). // We also want to make sure that we leave loh_size_threshold at the end // so we allocate a small object we don't need to worry about overflow there // when we do alloc_ptr+size. if (prgmem) { uint8_t* end_mem = (uint8_t*)prgmem + requested_size; if ((end_mem == 0) || ((size_t)(MAX_PTR - end_mem) <= END_SPACE_AFTER_GC)) { GCToOSInterface::VirtualRelease (prgmem, requested_size); dprintf (2, ("Virtual Alloc size %Id returned memory right against 4GB [%Ix, %Ix[ - discarding", requested_size, (size_t)prgmem, (size_t)((uint8_t*)prgmem+requested_size))); prgmem = 0; aligned_mem = 0; } } if (prgmem) { gc_heap::reserved_memory += requested_size; } dprintf (2, ("Virtual Alloc size %Id: [%Ix, %Ix[", requested_size, (size_t)prgmem, (size_t)((uint8_t*)prgmem+requested_size))); return aligned_mem; } static size_t get_valid_segment_size (BOOL large_seg=FALSE) { size_t seg_size, initial_seg_size; if (!large_seg) { initial_seg_size = INITIAL_ALLOC; seg_size = static_cast<size_t>(GCConfig::GetSegmentSize()); } else { initial_seg_size = LHEAP_ALLOC; seg_size = static_cast<size_t>(GCConfig::GetSegmentSize()) / 2; } #ifdef MULTIPLE_HEAPS #ifdef HOST_64BIT if (!large_seg) #endif // HOST_64BIT { if (g_num_processors > 4) initial_seg_size /= 2; if (g_num_processors > 8) initial_seg_size /= 2; } #endif //MULTIPLE_HEAPS // if seg_size is small but not 0 (0 is default if config not set) // then set the segment to the minimum size if (!g_theGCHeap->IsValidSegmentSize(seg_size)) { // if requested size is between 1 byte and 4MB, use min if ((seg_size >> 1) && !(seg_size >> 22)) seg_size = 1024*1024*4; else seg_size = initial_seg_size; } #ifdef HOST_64BIT seg_size = round_up_power2 (seg_size); #else seg_size = round_down_power2 (seg_size); #endif // HOST_64BIT return (seg_size); } #ifndef USE_REGIONS void gc_heap::compute_new_ephemeral_size() { int eph_gen_max = max_generation - 1 - (settings.promotion ? 1 : 0); size_t padding_size = 0; for (int i = 0; i <= eph_gen_max; i++) { dynamic_data* dd = dynamic_data_of (i); total_ephemeral_size += (dd_survived_size (dd) - dd_pinned_survived_size (dd)); #ifdef RESPECT_LARGE_ALIGNMENT total_ephemeral_size += dd_num_npinned_plugs (dd) * switch_alignment_size (FALSE); #endif //RESPECT_LARGE_ALIGNMENT #ifdef FEATURE_STRUCTALIGN total_ephemeral_size += dd_num_npinned_plugs (dd) * MAX_STRUCTALIGN; #endif //FEATURE_STRUCTALIGN #ifdef SHORT_PLUGS padding_size += dd_padding_size (dd); #endif //SHORT_PLUGS } total_ephemeral_size += eph_gen_starts_size; #ifdef RESPECT_LARGE_ALIGNMENT size_t planned_ephemeral_size = heap_segment_plan_allocated (ephemeral_heap_segment) - generation_plan_allocation_start (generation_of (max_generation-1)); total_ephemeral_size = min (total_ephemeral_size, planned_ephemeral_size); #endif //RESPECT_LARGE_ALIGNMENT #ifdef SHORT_PLUGS total_ephemeral_size = Align ((size_t)((double)total_ephemeral_size * short_plugs_pad_ratio) + 1); total_ephemeral_size += Align (DESIRED_PLUG_LENGTH); #endif //SHORT_PLUGS dprintf (3, ("total ephemeral size is %Ix, padding %Ix(%Ix)", total_ephemeral_size, padding_size, (total_ephemeral_size - padding_size))); } heap_segment* gc_heap::soh_get_segment_to_expand() { size_t size = soh_segment_size; ordered_plug_indices_init = FALSE; use_bestfit = FALSE; //compute the size of the new ephemeral heap segment. compute_new_ephemeral_size(); if ((settings.pause_mode != pause_low_latency) && (settings.pause_mode != pause_no_gc) #ifdef BACKGROUND_GC && (!gc_heap::background_running_p()) #endif //BACKGROUND_GC ) { assert (settings.condemned_generation <= max_generation); allocator* gen_alloc = ((settings.condemned_generation == max_generation) ? nullptr : generation_allocator (generation_of (max_generation))); dprintf (2, ("(gen%d)soh_get_segment_to_expand", settings.condemned_generation)); // try to find one in the gen 2 segment list, search backwards because the first segments // tend to be more compact than the later ones. heap_segment* fseg = heap_segment_rw (generation_start_segment (generation_of (max_generation))); PREFIX_ASSUME(fseg != NULL); #ifdef SEG_REUSE_STATS int try_reuse = 0; #endif //SEG_REUSE_STATS heap_segment* seg = ephemeral_heap_segment; while ((seg = heap_segment_prev_rw (fseg, seg)) && (seg != fseg)) { #ifdef SEG_REUSE_STATS try_reuse++; #endif //SEG_REUSE_STATS if (can_expand_into_p (seg, size/3, total_ephemeral_size, gen_alloc)) { get_gc_data_per_heap()->set_mechanism (gc_heap_expand, (use_bestfit ? expand_reuse_bestfit : expand_reuse_normal)); if (settings.condemned_generation == max_generation) { if (use_bestfit) { build_ordered_free_spaces (seg); dprintf (GTC_LOG, ("can use best fit")); } #ifdef SEG_REUSE_STATS dprintf (SEG_REUSE_LOG_0, ("(gen%d)soh_get_segment_to_expand: found seg #%d to reuse", settings.condemned_generation, try_reuse)); #endif //SEG_REUSE_STATS dprintf (GTC_LOG, ("max_gen: Found existing segment to expand into %Ix", (size_t)seg)); return seg; } else { #ifdef SEG_REUSE_STATS dprintf (SEG_REUSE_LOG_0, ("(gen%d)soh_get_segment_to_expand: found seg #%d to reuse - returning", settings.condemned_generation, try_reuse)); #endif //SEG_REUSE_STATS dprintf (GTC_LOG, ("max_gen-1: Found existing segment to expand into %Ix", (size_t)seg)); // If we return 0 here, the allocator will think since we are short on end // of seg we need to trigger a full compacting GC. So if sustained low latency // is set we should acquire a new seg instead, that way we wouldn't be short. // The real solution, of course, is to actually implement seg reuse in gen1. if (settings.pause_mode != pause_sustained_low_latency) { dprintf (GTC_LOG, ("max_gen-1: SustainedLowLatency is set, acquire a new seg")); get_gc_data_per_heap()->set_mechanism (gc_heap_expand, expand_next_full_gc); return 0; } } } } } heap_segment* result = get_segment (size, gc_oh_num::soh); if(result) { #ifdef BACKGROUND_GC if (current_c_gc_state == c_gc_state_planning) { // When we expand heap during bgc sweep, we set the seg to be swept so // we'll always look at cards for objects on the new segment. result->flags |= heap_segment_flags_swept; } #endif //BACKGROUND_GC FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(result), (size_t)(heap_segment_reserved (result) - heap_segment_mem(result)), gc_etw_segment_small_object_heap); } get_gc_data_per_heap()->set_mechanism (gc_heap_expand, (result ? expand_new_seg : expand_no_memory)); if (result == 0) { dprintf (2, ("h%d: failed to allocate a new segment!", heap_number)); } else { #ifdef MULTIPLE_HEAPS heap_segment_heap (result) = this; #endif //MULTIPLE_HEAPS } dprintf (GTC_LOG, ("(gen%d)creating new segment %Ix", settings.condemned_generation, result)); return result; } #endif //!USE_REGIONS //returns 0 in case of allocation failure heap_segment* gc_heap::get_segment (size_t size, gc_oh_num oh) { assert(oh != gc_oh_num::none); BOOL uoh_p = (oh == gc_oh_num::loh) || (oh == gc_oh_num::poh); if (heap_hard_limit) return NULL; heap_segment* result = 0; if (segment_standby_list != 0) { result = segment_standby_list; heap_segment* last = 0; while (result) { size_t hs = (size_t)(heap_segment_reserved (result) - (uint8_t*)result); if ((hs >= size) && ((hs / 2) < size)) { dprintf (2, ("Hoarded segment %Ix found", (size_t) result)); if (last) { heap_segment_next (last) = heap_segment_next (result); } else { segment_standby_list = heap_segment_next (result); } break; } else { last = result; result = heap_segment_next (result); } } } if (result) { init_heap_segment (result, __this #ifdef USE_REGIONS , 0, size, (uoh_p ? max_generation : 0) #endif //USE_REGIONS ); #ifdef BACKGROUND_GC if (is_bgc_in_progress()) { dprintf (GC_TABLE_LOG, ("hoarded seg %Ix, mark_array is %Ix", result, mark_array)); if (!commit_mark_array_new_seg (__this, result)) { dprintf (GC_TABLE_LOG, ("failed to commit mark array for hoarded seg")); // If we can't use it we need to thread it back. if (segment_standby_list != 0) { heap_segment_next (result) = segment_standby_list; segment_standby_list = result; } else { segment_standby_list = result; } result = 0; } } #endif //BACKGROUND_GC if (result) seg_mapping_table_add_segment (result, __this); } if (!result) { void* mem = virtual_alloc (size); if (!mem) { fgm_result.set_fgm (fgm_reserve_segment, size, uoh_p); return 0; } result = make_heap_segment ((uint8_t*)mem, size, __this, (uoh_p ? max_generation : 0)); if (result) { uint8_t* start; uint8_t* end; if (mem < g_gc_lowest_address) { start = (uint8_t*)mem; } else { start = (uint8_t*)g_gc_lowest_address; } if (((uint8_t*)mem + size) > g_gc_highest_address) { end = (uint8_t*)mem + size; } else { end = (uint8_t*)g_gc_highest_address; } if (gc_heap::grow_brick_card_tables (start, end, size, result, __this, uoh_p) != 0) { virtual_free (mem, size); return 0; } } else { fgm_result.set_fgm (fgm_commit_segment_beg, SEGMENT_INITIAL_COMMIT, uoh_p); virtual_free (mem, size); } if (result) { seg_mapping_table_add_segment (result, __this); } } #ifdef BACKGROUND_GC if (result) { ::record_changed_seg ((uint8_t*)result, heap_segment_reserved (result), settings.gc_index, current_bgc_state, seg_added); bgc_verify_mark_array_cleared (result); } #endif //BACKGROUND_GC dprintf (GC_TABLE_LOG, ("h%d: new seg: %Ix-%Ix (%Id)", heap_number, result, ((uint8_t*)result + size), size)); return result; } void gc_heap::release_segment (heap_segment* sg) { ptrdiff_t delta = 0; FIRE_EVENT(GCFreeSegment_V1, heap_segment_mem(sg)); virtual_free (sg, (uint8_t*)heap_segment_reserved (sg)-(uint8_t*)sg, sg); } heap_segment* gc_heap::get_segment_for_uoh (int gen_number, size_t size #ifdef MULTIPLE_HEAPS , gc_heap* hp #endif //MULTIPLE_HEAPS ) { #ifndef MULTIPLE_HEAPS gc_heap* hp = 0; #endif //MULTIPLE_HEAPS #ifdef USE_REGIONS heap_segment* res = hp->get_new_region (gen_number, size); #else //USE_REGIONS gc_oh_num oh = gen_to_oh (gen_number); heap_segment* res = hp->get_segment (size, oh); #endif //USE_REGIONS if (res != 0) { #ifdef MULTIPLE_HEAPS heap_segment_heap (res) = hp; #endif //MULTIPLE_HEAPS size_t flags = (gen_number == poh_generation) ? heap_segment_flags_poh : heap_segment_flags_loh; #ifdef USE_REGIONS // in the regions case, flags are set by get_new_region assert ((res->flags & (heap_segment_flags_loh | heap_segment_flags_poh)) == flags); #else //USE_REGIONS res->flags |= flags; #endif //USE_REGIONS FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(res), (size_t)(heap_segment_reserved (res) - heap_segment_mem(res)), (gen_number == poh_generation) ? gc_etw_segment_pinned_object_heap : gc_etw_segment_large_object_heap); #ifndef USE_REGIONS #ifdef MULTIPLE_HEAPS hp->thread_uoh_segment (gen_number, res); #else thread_uoh_segment (gen_number, res); #endif //MULTIPLE_HEAPS #endif //!USE_REGIONS GCToEEInterface::DiagAddNewRegion( gen_number, heap_segment_mem (res), heap_segment_allocated (res), heap_segment_reserved (res) ); } return res; } void gc_heap::thread_uoh_segment (int gen_number, heap_segment* new_seg) { heap_segment* seg = generation_allocation_segment (generation_of (gen_number)); while (heap_segment_next_rw (seg)) seg = heap_segment_next_rw (seg); heap_segment_next (seg) = new_seg; } heap_segment* gc_heap::get_uoh_segment (int gen_number, size_t size, BOOL* did_full_compact_gc) { *did_full_compact_gc = FALSE; size_t last_full_compact_gc_count = get_full_compact_gc_count(); //access to get_segment needs to be serialized add_saved_spinlock_info (true, me_release, mt_get_large_seg); leave_spin_lock (&more_space_lock_uoh); enter_spin_lock (&gc_heap::gc_lock); dprintf (SPINLOCK_LOG, ("[%d]Seg: Egc", heap_number)); // if a GC happened between here and before we ask for a segment in // get_uoh_segment, we need to count that GC. size_t current_full_compact_gc_count = get_full_compact_gc_count(); if (current_full_compact_gc_count > last_full_compact_gc_count) { *did_full_compact_gc = TRUE; } heap_segment* res = get_segment_for_uoh (gen_number, size #ifdef MULTIPLE_HEAPS , this #endif //MULTIPLE_HEAPS ); dprintf (SPINLOCK_LOG, ("[%d]Seg: A Lgc", heap_number)); leave_spin_lock (&gc_heap::gc_lock); enter_spin_lock (&more_space_lock_uoh); add_saved_spinlock_info (true, me_acquire, mt_get_large_seg); return res; } #ifdef MULTIPLE_HEAPS #ifdef HOST_X86 #ifdef _MSC_VER #pragma warning(disable:4035) static ptrdiff_t get_cycle_count() { __asm rdtsc } #pragma warning(default:4035) #elif defined(__GNUC__) static ptrdiff_t get_cycle_count() { ptrdiff_t cycles; ptrdiff_t cyclesHi; __asm__ __volatile__ ("rdtsc":"=a" (cycles), "=d" (cyclesHi)); return cycles; } #else //_MSC_VER #error Unknown compiler #endif //_MSC_VER #elif defined(TARGET_AMD64) #ifdef _MSC_VER extern "C" uint64_t __rdtsc(); #pragma intrinsic(__rdtsc) static ptrdiff_t get_cycle_count() { return (ptrdiff_t)__rdtsc(); } #elif defined(__GNUC__) static ptrdiff_t get_cycle_count() { ptrdiff_t cycles; ptrdiff_t cyclesHi; __asm__ __volatile__ ("rdtsc":"=a" (cycles), "=d" (cyclesHi)); return (cyclesHi << 32) | cycles; } #else // _MSC_VER extern "C" ptrdiff_t get_cycle_count(void); #endif // _MSC_VER #elif defined(TARGET_LOONGARCH64) static ptrdiff_t get_cycle_count() { ////FIXME: TODO for LOONGARCH64: //ptrdiff_t cycle; __asm__ volatile ("break \n"); return 0; } #else static ptrdiff_t get_cycle_count() { // @ARMTODO, @ARM64TODO, @WASMTODO: cycle counter is not exposed to user mode. For now (until we can show this // makes a difference on the configurations on which we'll run) just return 0. This will result in // all buffer access times being reported as equal in access_time(). return 0; } #endif //TARGET_X86 // We may not be on contiguous numa nodes so need to store // the node index as well. struct node_heap_count { int node_no; int heap_count; }; class heap_select { heap_select() {} public: static uint8_t* sniff_buffer; static unsigned n_sniff_buffers; static unsigned cur_sniff_index; static uint16_t proc_no_to_heap_no[MAX_SUPPORTED_CPUS]; static uint16_t heap_no_to_proc_no[MAX_SUPPORTED_CPUS]; static uint16_t heap_no_to_numa_node[MAX_SUPPORTED_CPUS]; static uint16_t proc_no_to_numa_node[MAX_SUPPORTED_CPUS]; static uint16_t numa_node_to_heap_map[MAX_SUPPORTED_CPUS+4]; // Note this is the total numa nodes GC heaps are on. There might be // more on the machine if GC threads aren't using all of them. static uint16_t total_numa_nodes; static node_heap_count heaps_on_node[MAX_SUPPORTED_NODES]; static int access_time(uint8_t *sniff_buffer, int heap_number, unsigned sniff_index, unsigned n_sniff_buffers) { ptrdiff_t start_cycles = get_cycle_count(); uint8_t sniff = sniff_buffer[(1 + heap_number*n_sniff_buffers + sniff_index)*HS_CACHE_LINE_SIZE]; assert (sniff == 0); ptrdiff_t elapsed_cycles = get_cycle_count() - start_cycles; // add sniff here just to defeat the optimizer elapsed_cycles += sniff; return (int) elapsed_cycles; } public: static BOOL init(int n_heaps) { assert (sniff_buffer == NULL && n_sniff_buffers == 0); if (!GCToOSInterface::CanGetCurrentProcessorNumber()) { n_sniff_buffers = n_heaps*2+1; size_t n_cache_lines = 1 + n_heaps * n_sniff_buffers + 1; size_t sniff_buf_size = n_cache_lines * HS_CACHE_LINE_SIZE; if (sniff_buf_size / HS_CACHE_LINE_SIZE != n_cache_lines) // check for overlow { return FALSE; } sniff_buffer = new (nothrow) uint8_t[sniff_buf_size]; if (sniff_buffer == 0) return FALSE; memset(sniff_buffer, 0, sniff_buf_size*sizeof(uint8_t)); } bool do_numa = GCToOSInterface::CanEnableGCNumaAware(); // we want to assign heap indices such that there is a contiguous // range of heap numbers for each numa node // we do this in two passes: // 1. gather processor numbers and numa node numbers for all heaps // 2. assign heap numbers for each numa node // Pass 1: gather processor numbers and numa node numbers uint16_t proc_no[MAX_SUPPORTED_CPUS]; uint16_t node_no[MAX_SUPPORTED_CPUS]; uint16_t max_node_no = 0; for (int i = 0; i < n_heaps; i++) { if (!GCToOSInterface::GetProcessorForHeap (i, &proc_no[i], &node_no[i])) break; if (!do_numa || node_no[i] == NUMA_NODE_UNDEFINED) node_no[i] = 0; max_node_no = max(max_node_no, node_no[i]); } // Pass 2: assign heap numbers by numa node int cur_heap_no = 0; for (uint16_t cur_node_no = 0; cur_node_no <= max_node_no; cur_node_no++) { for (int i = 0; i < n_heaps; i++) { if (node_no[i] != cur_node_no) continue; // we found a heap on cur_node_no heap_no_to_proc_no[cur_heap_no] = proc_no[i]; heap_no_to_numa_node[cur_heap_no] = cur_node_no; proc_no_to_numa_node[proc_no[i]] = cur_node_no; cur_heap_no++; } } return TRUE; } static void init_cpu_mapping(int heap_number) { if (GCToOSInterface::CanGetCurrentProcessorNumber()) { uint32_t proc_no = GCToOSInterface::GetCurrentProcessorNumber(); proc_no_to_heap_no[proc_no] = (uint16_t)heap_number; } } static void mark_heap(int heap_number) { if (GCToOSInterface::CanGetCurrentProcessorNumber()) return; for (unsigned sniff_index = 0; sniff_index < n_sniff_buffers; sniff_index++) sniff_buffer[(1 + heap_number*n_sniff_buffers + sniff_index)*HS_CACHE_LINE_SIZE] &= 1; } static int select_heap(alloc_context* acontext) { #ifndef TRACE_GC UNREFERENCED_PARAMETER(acontext); // only referenced by dprintf #endif //TRACE_GC if (GCToOSInterface::CanGetCurrentProcessorNumber()) { uint32_t proc_no = GCToOSInterface::GetCurrentProcessorNumber(); return proc_no_to_heap_no[proc_no]; } unsigned sniff_index = Interlocked::Increment(&cur_sniff_index); sniff_index %= n_sniff_buffers; int best_heap = 0; int best_access_time = 1000*1000*1000; int second_best_access_time = best_access_time; uint8_t *l_sniff_buffer = sniff_buffer; unsigned l_n_sniff_buffers = n_sniff_buffers; for (int heap_number = 0; heap_number < gc_heap::n_heaps; heap_number++) { int this_access_time = access_time(l_sniff_buffer, heap_number, sniff_index, l_n_sniff_buffers); if (this_access_time < best_access_time) { second_best_access_time = best_access_time; best_access_time = this_access_time; best_heap = heap_number; } else if (this_access_time < second_best_access_time) { second_best_access_time = this_access_time; } } if (best_access_time*2 < second_best_access_time) { sniff_buffer[(1 + best_heap*n_sniff_buffers + sniff_index)*HS_CACHE_LINE_SIZE] &= 1; dprintf (3, ("select_heap yields crisp %d for context %p\n", best_heap, (void *)acontext)); } else { dprintf (3, ("select_heap yields vague %d for context %p\n", best_heap, (void *)acontext )); } return best_heap; } static bool can_find_heap_fast() { return GCToOSInterface::CanGetCurrentProcessorNumber(); } static uint16_t find_heap_no_from_proc_no(uint16_t proc_no) { return proc_no_to_heap_no[proc_no]; } static uint16_t find_proc_no_from_heap_no(int heap_number) { return heap_no_to_proc_no[heap_number]; } static void set_proc_no_for_heap(int heap_number, uint16_t proc_no) { heap_no_to_proc_no[heap_number] = proc_no; } static uint16_t find_numa_node_from_heap_no(int heap_number) { return heap_no_to_numa_node[heap_number]; } static uint16_t find_numa_node_from_proc_no (uint16_t proc_no) { return proc_no_to_numa_node[proc_no]; } static void set_numa_node_for_heap_and_proc(int heap_number, uint16_t proc_no, uint16_t numa_node) { heap_no_to_numa_node[heap_number] = numa_node; proc_no_to_numa_node[proc_no] = numa_node; } static void init_numa_node_to_heap_map(int nheaps) { // Called right after GCHeap::Init() for each heap // For each NUMA node used by the heaps, the // numa_node_to_heap_map[numa_node] is set to the first heap number on that node and // numa_node_to_heap_map[numa_node + 1] is set to the first heap number not on that node // Set the start of the heap number range for the first NUMA node numa_node_to_heap_map[heap_no_to_numa_node[0]] = 0; total_numa_nodes = 0; memset (heaps_on_node, 0, sizeof (heaps_on_node)); heaps_on_node[0].node_no = heap_no_to_numa_node[0]; heaps_on_node[0].heap_count = 1; for (int i=1; i < nheaps; i++) { if (heap_no_to_numa_node[i] != heap_no_to_numa_node[i-1]) { total_numa_nodes++; heaps_on_node[total_numa_nodes].node_no = heap_no_to_numa_node[i]; // Set the end of the heap number range for the previous NUMA node numa_node_to_heap_map[heap_no_to_numa_node[i-1] + 1] = // Set the start of the heap number range for the current NUMA node numa_node_to_heap_map[heap_no_to_numa_node[i]] = (uint16_t)i; } (heaps_on_node[total_numa_nodes].heap_count)++; } // Set the end of the heap range for the last NUMA node numa_node_to_heap_map[heap_no_to_numa_node[nheaps-1] + 1] = (uint16_t)nheaps; //mark the end with nheaps total_numa_nodes++; } // TODO: curently this doesn't work with GCHeapAffinitizeMask/GCHeapAffinitizeRanges // because the heaps may not be on contiguous active procs. // // This is for scenarios where GCHeapCount is specified as something like // (g_num_active_processors - 2) to allow less randomization to the Server GC threads. // In this case we want to assign the right heaps to those procs, ie if they share // the same numa node we want to assign local heaps to those procs. Otherwise we // let the heap balancing mechanism take over for now. static void distribute_other_procs() { if (affinity_config_specified_p) return; uint16_t proc_no = 0; uint16_t node_no = 0; bool res = false; int start_heap = -1; int end_heap = -1; int current_node_no = -1; int current_heap_on_node = -1; for (int i = gc_heap::n_heaps; i < (int)g_num_active_processors; i++) { if (!GCToOSInterface::GetProcessorForHeap (i, &proc_no, &node_no)) break; int start_heap = (int)numa_node_to_heap_map[node_no]; int end_heap = (int)(numa_node_to_heap_map[node_no + 1]); if ((end_heap - start_heap) > 0) { if (node_no == current_node_no) { // We already iterated through all heaps on this node, don't add more procs to these // heaps. if (current_heap_on_node >= end_heap) { continue; } } else { current_node_no = node_no; current_heap_on_node = start_heap; } proc_no_to_heap_no[proc_no] = current_heap_on_node; proc_no_to_numa_node[proc_no] = node_no; current_heap_on_node++; } } } static void get_heap_range_for_heap(int hn, int* start, int* end) { uint16_t numa_node = heap_no_to_numa_node[hn]; *start = (int)numa_node_to_heap_map[numa_node]; *end = (int)(numa_node_to_heap_map[numa_node+1]); #ifdef HEAP_BALANCE_INSTRUMENTATION dprintf(HEAP_BALANCE_TEMP_LOG, ("TEMPget_heap_range: %d is in numa node %d, start = %d, end = %d", hn, numa_node, *start, *end)); #endif //HEAP_BALANCE_INSTRUMENTATION } // This gets the next valid numa node index starting at current_index+1. // It assumes that current_index is a valid node index. // If current_index+1 is at the end this will start at the beginning. So this will // always return a valid node index, along with that node's start/end heaps. static uint16_t get_next_numa_node (uint16_t current_index, int* start, int* end) { int start_index = current_index + 1; int nheaps = gc_heap::n_heaps; bool found_node_with_heaps_p = false; do { int start_heap = (int)numa_node_to_heap_map[start_index]; int end_heap = (int)numa_node_to_heap_map[start_index + 1]; if (start_heap == nheaps) { // This is the last node. start_index = 0; continue; } if ((end_heap - start_heap) == 0) { // This node has no heaps. start_index++; } else { found_node_with_heaps_p = true; *start = start_heap; *end = end_heap; } } while (!found_node_with_heaps_p); return start_index; } }; uint8_t* heap_select::sniff_buffer; unsigned heap_select::n_sniff_buffers; unsigned heap_select::cur_sniff_index; uint16_t heap_select::proc_no_to_heap_no[MAX_SUPPORTED_CPUS]; uint16_t heap_select::heap_no_to_proc_no[MAX_SUPPORTED_CPUS]; uint16_t heap_select::heap_no_to_numa_node[MAX_SUPPORTED_CPUS]; uint16_t heap_select::proc_no_to_numa_node[MAX_SUPPORTED_CPUS]; uint16_t heap_select::numa_node_to_heap_map[MAX_SUPPORTED_CPUS+4]; uint16_t heap_select::total_numa_nodes; node_heap_count heap_select::heaps_on_node[MAX_SUPPORTED_NODES]; #ifdef HEAP_BALANCE_INSTRUMENTATION // This records info we use to look at effect of different strategies // for heap balancing. struct heap_balance_info { uint64_t timestamp; // This also encodes when we detect the thread runs on // different proc during a balance attempt. Sometimes // I observe this happens multiple times during one attempt! // If this happens, I just record the last proc we observe // and set MSB. int tid; // This records the final alloc_heap for the thread. // // This also encodes the reason why we needed to set_home_heap // in balance_heaps. // If we set it because the home heap is not the same as the proc, // we set MSB. // // If we set ideal proc, we set the 2nd MSB. int alloc_heap; int ideal_proc_no; }; // This means inbetween each GC we can log at most this many entries per proc. // This is usually enough. Most of the time we only need to log something every 128k // of allocations in balance_heaps and gen0 budget is <= 200mb. #define default_max_hb_heap_balance_info 4096 struct heap_balance_info_proc { int count; int index; heap_balance_info hb_info[default_max_hb_heap_balance_info]; }; struct heap_balance_info_numa { heap_balance_info_proc* hb_info_procs; }; uint64_t start_raw_ts = 0; bool cpu_group_enabled_p = false; uint32_t procs_per_numa_node = 0; uint16_t total_numa_nodes_on_machine = 0; uint32_t procs_per_cpu_group = 0; uint16_t total_cpu_groups_on_machine = 0; // Note this is still on one of the numa nodes, so we'll incur a remote access // no matter what. heap_balance_info_numa* hb_info_numa_nodes = NULL; // TODO: This doesn't work for multiple nodes per CPU group yet. int get_proc_index_numa (int proc_no, int* numa_no) { if (total_numa_nodes_on_machine == 1) { *numa_no = 0; return proc_no; } else { if (cpu_group_enabled_p) { // see vm\gcenv.os.cpp GroupProcNo implementation. *numa_no = proc_no >> 6; return (proc_no % 64); } else { *numa_no = proc_no / procs_per_numa_node; return (proc_no % procs_per_numa_node); } } } // We could consider optimizing it so we don't need to get the tid // everytime but it's not very expensive to get. void add_to_hb_numa ( int proc_no, int ideal_proc_no, int alloc_heap, bool multiple_procs_p, bool alloc_count_p, bool set_ideal_p) { int tid = (int)GCToOSInterface::GetCurrentThreadIdForLogging (); uint64_t timestamp = RawGetHighPrecisionTimeStamp (); int saved_proc_no = proc_no; int numa_no = -1; proc_no = get_proc_index_numa (proc_no, &numa_no); heap_balance_info_numa* hb_info_numa_node = &hb_info_numa_nodes[numa_no]; heap_balance_info_proc* hb_info_proc = &(hb_info_numa_node->hb_info_procs[proc_no]); int index = hb_info_proc->index; int count = hb_info_proc->count; if (index == count) { // Too much info inbetween GCs. This can happen if the thread is scheduled on a different // processor very often so it caused us to log many entries due to that reason. You could // increase default_max_hb_heap_balance_info but this usually indicates a problem that // should be investigated. dprintf (HEAP_BALANCE_LOG, ("too much info between GCs, already logged %d entries", index)); GCToOSInterface::DebugBreak (); } heap_balance_info* hb_info = &(hb_info_proc->hb_info[index]); dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMP[p%3d->%3d(i:%3d), N%d] #%4d: %I64d, tid %d, ah: %d, m: %d, p: %d, i: %d", saved_proc_no, proc_no, ideal_proc_no, numa_no, index, (timestamp - start_raw_ts) / 1000, tid, alloc_heap, (int)multiple_procs_p, (int)(!alloc_count_p), (int)set_ideal_p)); if (multiple_procs_p) { tid |= (1 << (sizeof (tid) * 8 - 1)); } if (!alloc_count_p) { alloc_heap |= (1 << (sizeof (alloc_heap) * 8 - 1)); } if (set_ideal_p) { alloc_heap |= (1 << (sizeof (alloc_heap) * 8 - 2)); } hb_info->timestamp = timestamp; hb_info->tid = tid; hb_info->alloc_heap = alloc_heap; hb_info->ideal_proc_no = ideal_proc_no; (hb_info_proc->index)++; } const int hb_log_buffer_size = 4096; static char hb_log_buffer[hb_log_buffer_size]; int last_hb_recorded_gc_index = -1; #endif //HEAP_BALANCE_INSTRUMENTATION // This logs what we recorded in balance_heaps // The format for this is // // [ms since last GC end] // [cpu index] // all elements we stored before this GC for this CPU in the format // timestamp,tid, alloc_heap_no // repeat this for each CPU // // the timestamp here is just the result of calling QPC, // it's not converted to ms. The conversion will be done when we process // the log. void gc_heap::hb_log_balance_activities() { #ifdef HEAP_BALANCE_INSTRUMENTATION char* log_buffer = hb_log_buffer; uint64_t now = GetHighPrecisionTimeStamp(); size_t time_since_last_gc_ms = (size_t)((now - last_gc_end_time_us) / 1000); dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMP%Id - %Id = %Id", now, last_gc_end_time_ms, time_since_last_gc_ms)); // We want to get the min and the max timestamp for all procs because it helps with our post processing // to know how big an array to allocate to display the history inbetween the GCs. uint64_t min_timestamp = 0xffffffffffffffff; uint64_t max_timestamp = 0; for (int numa_node_index = 0; numa_node_index < total_numa_nodes_on_machine; numa_node_index++) { heap_balance_info_proc* hb_info_procs = hb_info_numa_nodes[numa_node_index].hb_info_procs; for (int proc_index = 0; proc_index < (int)procs_per_numa_node; proc_index++) { heap_balance_info_proc* hb_info_proc = &hb_info_procs[proc_index]; int total_entries_on_proc = hb_info_proc->index; if (total_entries_on_proc > 0) { min_timestamp = min (min_timestamp, hb_info_proc->hb_info[0].timestamp); max_timestamp = max (max_timestamp, hb_info_proc->hb_info[total_entries_on_proc - 1].timestamp); } } } dprintf (HEAP_BALANCE_LOG, ("[GCA#%Id %Id-%I64d-%I64d]", settings.gc_index, time_since_last_gc_ms, (min_timestamp - start_raw_ts), (max_timestamp - start_raw_ts))); if (last_hb_recorded_gc_index == (int)settings.gc_index) { GCToOSInterface::DebugBreak (); } last_hb_recorded_gc_index = (int)settings.gc_index; // When we print out the proc index we need to convert it to the actual proc index (this is contiguous). // It helps with post processing. for (int numa_node_index = 0; numa_node_index < total_numa_nodes_on_machine; numa_node_index++) { heap_balance_info_proc* hb_info_procs = hb_info_numa_nodes[numa_node_index].hb_info_procs; for (int proc_index = 0; proc_index < (int)procs_per_numa_node; proc_index++) { heap_balance_info_proc* hb_info_proc = &hb_info_procs[proc_index]; int total_entries_on_proc = hb_info_proc->index; if (total_entries_on_proc > 0) { int total_exec_time_ms = (int)((double)(hb_info_proc->hb_info[total_entries_on_proc - 1].timestamp - hb_info_proc->hb_info[0].timestamp) * qpf_ms); dprintf (HEAP_BALANCE_LOG, ("[p%d]-%d-%dms", (proc_index + numa_node_index * procs_per_numa_node), total_entries_on_proc, total_exec_time_ms)); } for (int i = 0; i < hb_info_proc->index; i++) { heap_balance_info* hb_info = &hb_info_proc->hb_info[i]; bool multiple_procs_p = false; bool alloc_count_p = true; bool set_ideal_p = false; int tid = hb_info->tid; int alloc_heap = hb_info->alloc_heap; if (tid & (1 << (sizeof (tid) * 8 - 1))) { multiple_procs_p = true; tid &= ~(1 << (sizeof (tid) * 8 - 1)); } if (alloc_heap & (1 << (sizeof (alloc_heap) * 8 - 1))) { alloc_count_p = false; alloc_heap &= ~(1 << (sizeof (alloc_heap) * 8 - 1)); } if (alloc_heap & (1 << (sizeof (alloc_heap) * 8 - 2))) { set_ideal_p = true; alloc_heap &= ~(1 << (sizeof (alloc_heap) * 8 - 2)); } // TODO - This assumes ideal proc is in the same cpu group which is not true // when we don't have CPU groups. int ideal_proc_no = hb_info->ideal_proc_no; int ideal_node_no = -1; ideal_proc_no = get_proc_index_numa (ideal_proc_no, &ideal_node_no); ideal_proc_no = ideal_proc_no + ideal_node_no * procs_per_numa_node; dprintf (HEAP_BALANCE_LOG, ("%I64d,%d,%d,%d%s%s%s", (hb_info->timestamp - start_raw_ts), tid, ideal_proc_no, (int)alloc_heap, (multiple_procs_p ? "|m" : ""), (!alloc_count_p ? "|p" : ""), (set_ideal_p ? "|i" : ""))); } } } for (int numa_node_index = 0; numa_node_index < total_numa_nodes_on_machine; numa_node_index++) { heap_balance_info_proc* hb_info_procs = hb_info_numa_nodes[numa_node_index].hb_info_procs; for (int proc_index = 0; proc_index < (int)procs_per_numa_node; proc_index++) { heap_balance_info_proc* hb_info_proc = &hb_info_procs[proc_index]; hb_info_proc->index = 0; } } #endif //HEAP_BALANCE_INSTRUMENTATION } // The format for this is // // [GC_alloc_mb] // h0_new_alloc, h1_new_alloc, ... // void gc_heap::hb_log_new_allocation() { #ifdef HEAP_BALANCE_INSTRUMENTATION char* log_buffer = hb_log_buffer; int desired_alloc_mb = (int)(dd_desired_allocation (g_heaps[0]->dynamic_data_of (0)) / 1024 / 1024); int buffer_pos = sprintf_s (hb_log_buffer, hb_log_buffer_size, "[GC_alloc_mb]\n"); for (int numa_node_index = 0; numa_node_index < heap_select::total_numa_nodes; numa_node_index++) { int node_allocated_mb = 0; // I'm printing out the budget here instead of the numa node index so we know how much // of the budget we consumed. buffer_pos += sprintf_s (hb_log_buffer + buffer_pos, hb_log_buffer_size - buffer_pos, "[N#%3d]", //numa_node_index); desired_alloc_mb); int heaps_on_node = heap_select::heaps_on_node[numa_node_index].heap_count; for (int heap_index = 0; heap_index < heaps_on_node; heap_index++) { int actual_heap_index = heap_index + numa_node_index * heaps_on_node; gc_heap* hp = g_heaps[actual_heap_index]; dynamic_data* dd0 = hp->dynamic_data_of (0); int allocated_mb = (int)((dd_desired_allocation (dd0) - dd_new_allocation (dd0)) / 1024 / 1024); node_allocated_mb += allocated_mb; buffer_pos += sprintf_s (hb_log_buffer + buffer_pos, hb_log_buffer_size - buffer_pos, "%d,", allocated_mb); } dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPN#%d a %dmb(%dmb)", numa_node_index, node_allocated_mb, desired_alloc_mb)); buffer_pos += sprintf_s (hb_log_buffer + buffer_pos, hb_log_buffer_size - buffer_pos, "\n"); } dprintf (HEAP_BALANCE_LOG, ("%s", hb_log_buffer)); #endif //HEAP_BALANCE_INSTRUMENTATION } BOOL gc_heap::create_thread_support (int number_of_heaps) { BOOL ret = FALSE; if (!gc_start_event.CreateOSManualEventNoThrow (FALSE)) { goto cleanup; } if (!ee_suspend_event.CreateOSAutoEventNoThrow (FALSE)) { goto cleanup; } if (!gc_t_join.init (number_of_heaps, join_flavor_server_gc)) { goto cleanup; } ret = TRUE; cleanup: if (!ret) { destroy_thread_support(); } return ret; } void gc_heap::destroy_thread_support () { if (ee_suspend_event.IsValid()) { ee_suspend_event.CloseEvent(); } if (gc_start_event.IsValid()) { gc_start_event.CloseEvent(); } } void set_thread_affinity_for_heap (int heap_number, uint16_t proc_no) { if (!GCToOSInterface::SetThreadAffinity (proc_no)) { dprintf (1, ("Failed to set thread affinity for GC thread %d on proc #%d", heap_number, proc_no)); } } bool gc_heap::create_gc_thread () { dprintf (3, ("Creating gc thread\n")); return GCToEEInterface::CreateThread(gc_thread_stub, this, false, ".NET Server GC"); } #ifdef _MSC_VER #pragma warning(disable:4715) //IA64 xcompiler recognizes that without the 'break;' the while(1) will never end and therefore not return a value for that code path #endif //_MSC_VER void gc_heap::gc_thread_function () { assert (gc_done_event.IsValid()); assert (gc_start_event.IsValid()); dprintf (3, ("gc thread started")); heap_select::init_cpu_mapping(heap_number); while (1) { assert (!gc_t_join.joined()); if (heap_number == 0) { uint32_t wait_result = gc_heap::ee_suspend_event.Wait(gradual_decommit_in_progress_p ? DECOMMIT_TIME_STEP_MILLISECONDS : INFINITE, FALSE); if (wait_result == WAIT_TIMEOUT) { gradual_decommit_in_progress_p = decommit_step (); continue; } suspended_start_time = GetHighPrecisionTimeStamp(); BEGIN_TIMING(suspend_ee_during_log); GCToEEInterface::SuspendEE(SUSPEND_FOR_GC); END_TIMING(suspend_ee_during_log); proceed_with_gc_p = TRUE; gradual_decommit_in_progress_p = FALSE; if (!should_proceed_with_gc()) { update_collection_counts_for_no_gc(); proceed_with_gc_p = FALSE; } else { settings.init_mechanisms(); gc_start_event.Set(); } dprintf (3, (ThreadStressLog::gcServerThread0StartMsg(), heap_number)); } else { gc_start_event.Wait(INFINITE, FALSE); dprintf (3, (ThreadStressLog::gcServerThreadNStartMsg(), heap_number)); } assert ((heap_number == 0) || proceed_with_gc_p); if (proceed_with_gc_p) { garbage_collect (GCHeap::GcCondemnedGeneration); if (pm_trigger_full_gc) { garbage_collect_pm_full_gc(); } } if (heap_number == 0) { if (proceed_with_gc_p && (!settings.concurrent)) { do_post_gc(); } #ifdef BACKGROUND_GC recover_bgc_settings(); #endif //BACKGROUND_GC #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; hp->add_saved_spinlock_info (false, me_release, mt_block_gc); leave_spin_lock(&hp->more_space_lock_soh); } #endif //MULTIPLE_HEAPS gc_heap::gc_started = FALSE; #ifdef BACKGROUND_GC gc_heap::add_bgc_pause_duration_0(); #endif //BACKGROUND_GC BEGIN_TIMING(restart_ee_during_log); GCToEEInterface::RestartEE(TRUE); END_TIMING(restart_ee_during_log); process_sync_log_stats(); dprintf (SPINLOCK_LOG, ("GC Lgc")); leave_spin_lock (&gc_heap::gc_lock); gc_heap::internal_gc_done = true; if (proceed_with_gc_p) set_gc_done(); else { // If we didn't actually do a GC, it means we didn't wait up the other threads, // we still need to set the gc_done_event for those threads. for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; hp->set_gc_done(); } } // check if we should do some decommitting if (gradual_decommit_in_progress_p) { gradual_decommit_in_progress_p = decommit_step (); } } else { int spin_count = 32 * (gc_heap::n_heaps - 1); // wait until RestartEE has progressed to a stage where we can restart user threads while (!gc_heap::internal_gc_done && !GCHeap::SafeToRestartManagedThreads()) { spin_and_switch (spin_count, (gc_heap::internal_gc_done || GCHeap::SafeToRestartManagedThreads())); } set_gc_done(); } } } #ifdef _MSC_VER #pragma warning(default:4715) //IA64 xcompiler recognizes that without the 'break;' the while(1) will never end and therefore not return a value for that code path #endif //_MSC_VER #endif //MULTIPLE_HEAPS bool gc_heap::virtual_alloc_commit_for_heap (void* addr, size_t size, int h_number) { #if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK) // Currently there is no way for us to specific the numa node to allocate on via hosting interfaces to // a host. This will need to be added later. #if !defined(FEATURE_CORECLR) && !defined(BUILD_AS_STANDALONE) if (!CLRMemoryHosted()) #endif { if (GCToOSInterface::CanEnableGCNumaAware()) { uint16_t numa_node = heap_select::find_numa_node_from_heap_no(h_number); if (GCToOSInterface::VirtualCommit (addr, size, numa_node)) return true; } } #else //MULTIPLE_HEAPS && !FEATURE_REDHAWK UNREFERENCED_PARAMETER(h_number); #endif //MULTIPLE_HEAPS && !FEATURE_REDHAWK //numa aware not enabled, or call failed --> fallback to VirtualCommit() return GCToOSInterface::VirtualCommit(addr, size); } bool gc_heap::virtual_commit (void* address, size_t size, gc_oh_num oh, int h_number, bool* hard_limit_exceeded_p) { #ifndef HOST_64BIT assert (heap_hard_limit == 0); #endif //!HOST_64BIT if (heap_hard_limit) { check_commit_cs.Enter(); bool exceeded_p = false; if (heap_hard_limit_oh[soh] != 0) { if ((oh != gc_oh_num::none) && (committed_by_oh[oh] + size) > heap_hard_limit_oh[oh]) { exceeded_p = true; } } else if ((current_total_committed + size) > heap_hard_limit) { dprintf (1, ("%Id + %Id = %Id > limit %Id ", current_total_committed, size, (current_total_committed + size), heap_hard_limit)); exceeded_p = true; } if (!exceeded_p) { committed_by_oh[oh] += size; current_total_committed += size; if (h_number < 0) current_total_committed_bookkeeping += size; } check_commit_cs.Leave(); if (hard_limit_exceeded_p) *hard_limit_exceeded_p = exceeded_p; if (exceeded_p) { dprintf (1, ("can't commit %Ix for %Id bytes > HARD LIMIT %Id", (size_t)address, size, heap_hard_limit)); return false; } } // If it's a valid heap number it means it's commiting for memory on the GC heap. // In addition if large pages is enabled, we set commit_succeeded_p to true because memory is already committed. bool commit_succeeded_p = ((h_number >= 0) ? (use_large_pages_p ? true : virtual_alloc_commit_for_heap (address, size, h_number)) : GCToOSInterface::VirtualCommit(address, size)); if (!commit_succeeded_p && heap_hard_limit) { check_commit_cs.Enter(); committed_by_oh[oh] -= size; dprintf (1, ("commit failed, updating %Id to %Id", current_total_committed, (current_total_committed - size))); current_total_committed -= size; if (h_number < 0) current_total_committed_bookkeeping -= size; check_commit_cs.Leave(); } return commit_succeeded_p; } bool gc_heap::virtual_decommit (void* address, size_t size, gc_oh_num oh, int h_number) { #ifndef HOST_64BIT assert (heap_hard_limit == 0); #endif //!HOST_64BIT bool decommit_succeeded_p = GCToOSInterface::VirtualDecommit (address, size); if (decommit_succeeded_p && heap_hard_limit) { check_commit_cs.Enter(); committed_by_oh[oh] -= size; current_total_committed -= size; if (h_number < 0) current_total_committed_bookkeeping -= size; check_commit_cs.Leave(); } return decommit_succeeded_p; } void gc_heap::virtual_free (void* add, size_t allocated_size, heap_segment* sg) { bool release_succeeded_p = GCToOSInterface::VirtualRelease (add, allocated_size); if (release_succeeded_p) { reserved_memory -= allocated_size; dprintf (2, ("Virtual Free size %Id: [%Ix, %Ix[", allocated_size, (size_t)add, (size_t)((uint8_t*)add + allocated_size))); } } class mark { public: uint8_t* first; size_t len; // If we want to save space we can have a pool of plug_and_gap's instead of // always having 2 allocated for each pinned plug. gap_reloc_pair saved_pre_plug; // If we decide to not compact, we need to restore the original values. gap_reloc_pair saved_pre_plug_reloc; gap_reloc_pair saved_post_plug; // Supposedly Pinned objects cannot have references but we are seeing some from pinvoke // frames. Also if it's an artificially pinned plug created by us, it can certainly // have references. // We know these cases will be rare so we can optimize this to be only allocated on demand. gap_reloc_pair saved_post_plug_reloc; // We need to calculate this after we are done with plan phase and before compact // phase because compact phase will change the bricks so relocate_address will no // longer work. uint8_t* saved_pre_plug_info_reloc_start; // We need to save this because we will have no way to calculate it, unlike the // pre plug info start which is right before this plug. uint8_t* saved_post_plug_info_start; #ifdef SHORT_PLUGS uint8_t* allocation_context_start_region; #endif //SHORT_PLUGS // How the bits in these bytes are organized: // MSB --> LSB // bit to indicate whether it's a short obj | 3 bits for refs in this short obj | 2 unused bits | bit to indicate if it's collectible | last bit // last bit indicates if there's pre or post info associated with this plug. If it's not set all other bits will be 0. BOOL saved_pre_p; BOOL saved_post_p; #ifdef _DEBUG // We are seeing this is getting corrupted for a PP with a NP after. // Save it when we first set it and make sure it doesn't change. gap_reloc_pair saved_post_plug_debug; #endif //_DEBUG size_t get_max_short_bits() { return (sizeof (gap_reloc_pair) / sizeof (uint8_t*)); } // pre bits size_t get_pre_short_start_bit () { return (sizeof (saved_pre_p) * 8 - 1 - (sizeof (gap_reloc_pair) / sizeof (uint8_t*))); } BOOL pre_short_p() { return (saved_pre_p & (1 << (sizeof (saved_pre_p) * 8 - 1))); } void set_pre_short() { saved_pre_p |= (1 << (sizeof (saved_pre_p) * 8 - 1)); } void set_pre_short_bit (size_t bit) { saved_pre_p |= 1 << (get_pre_short_start_bit() + bit); } BOOL pre_short_bit_p (size_t bit) { return (saved_pre_p & (1 << (get_pre_short_start_bit() + bit))); } #ifdef COLLECTIBLE_CLASS void set_pre_short_collectible() { saved_pre_p |= 2; } BOOL pre_short_collectible_p() { return (saved_pre_p & 2); } #endif //COLLECTIBLE_CLASS // post bits size_t get_post_short_start_bit () { return (sizeof (saved_post_p) * 8 - 1 - (sizeof (gap_reloc_pair) / sizeof (uint8_t*))); } BOOL post_short_p() { return (saved_post_p & (1 << (sizeof (saved_post_p) * 8 - 1))); } void set_post_short() { saved_post_p |= (1 << (sizeof (saved_post_p) * 8 - 1)); } void set_post_short_bit (size_t bit) { saved_post_p |= 1 << (get_post_short_start_bit() + bit); } BOOL post_short_bit_p (size_t bit) { return (saved_post_p & (1 << (get_post_short_start_bit() + bit))); } #ifdef COLLECTIBLE_CLASS void set_post_short_collectible() { saved_post_p |= 2; } BOOL post_short_collectible_p() { return (saved_post_p & 2); } #endif //COLLECTIBLE_CLASS uint8_t* get_plug_address() { return first; } BOOL has_pre_plug_info() { return saved_pre_p; } BOOL has_post_plug_info() { return saved_post_p; } gap_reloc_pair* get_pre_plug_reloc_info() { return &saved_pre_plug_reloc; } gap_reloc_pair* get_post_plug_reloc_info() { return &saved_post_plug_reloc; } void set_pre_plug_info_reloc_start (uint8_t* reloc) { saved_pre_plug_info_reloc_start = reloc; } uint8_t* get_post_plug_info_start() { return saved_post_plug_info_start; } // We need to temporarily recover the shortened plugs for compact phase so we can // copy over the whole plug and their related info (mark bits/cards). But we will // need to set the artificial gap back so compact phase can keep reading the plug info. // We also need to recover the saved info because we'll need to recover it later. // // So we would call swap_p*_plug_and_saved once to recover the object info; then call // it again to recover the artificial gap. void swap_pre_plug_and_saved() { gap_reloc_pair temp; memcpy (&temp, (first - sizeof (plug_and_gap)), sizeof (temp)); memcpy ((first - sizeof (plug_and_gap)), &saved_pre_plug_reloc, sizeof (saved_pre_plug_reloc)); saved_pre_plug_reloc = temp; } void swap_post_plug_and_saved() { gap_reloc_pair temp; memcpy (&temp, saved_post_plug_info_start, sizeof (temp)); memcpy (saved_post_plug_info_start, &saved_post_plug_reloc, sizeof (saved_post_plug_reloc)); saved_post_plug_reloc = temp; } void swap_pre_plug_and_saved_for_profiler() { gap_reloc_pair temp; memcpy (&temp, (first - sizeof (plug_and_gap)), sizeof (temp)); memcpy ((first - sizeof (plug_and_gap)), &saved_pre_plug, sizeof (saved_pre_plug)); saved_pre_plug = temp; } void swap_post_plug_and_saved_for_profiler() { gap_reloc_pair temp; memcpy (&temp, saved_post_plug_info_start, sizeof (temp)); memcpy (saved_post_plug_info_start, &saved_post_plug, sizeof (saved_post_plug)); saved_post_plug = temp; } // We should think about whether it's really necessary to have to copy back the pre plug // info since it was already copied during compacting plugs. But if a plug doesn't move // by >= 3 ptr size (the size of gap_reloc_pair), it means we'd have to recover pre plug info. size_t recover_plug_info() { // We need to calculate the size for sweep case in order to correctly record the // free_obj_space - sweep would've made these artifical gaps into free objects and // we would need to deduct the size because now we are writing into those free objects. size_t recovered_sweep_size = 0; if (saved_pre_p) { if (gc_heap::settings.compaction) { dprintf (3, ("%Ix: REC Pre: %Ix-%Ix", first, &saved_pre_plug_reloc, saved_pre_plug_info_reloc_start)); memcpy (saved_pre_plug_info_reloc_start, &saved_pre_plug_reloc, sizeof (saved_pre_plug_reloc)); } else { dprintf (3, ("%Ix: REC Pre: %Ix-%Ix", first, &saved_pre_plug, (first - sizeof (plug_and_gap)))); memcpy ((first - sizeof (plug_and_gap)), &saved_pre_plug, sizeof (saved_pre_plug)); recovered_sweep_size += sizeof (saved_pre_plug); } } if (saved_post_p) { if (gc_heap::settings.compaction) { dprintf (3, ("%Ix: REC Post: %Ix-%Ix", first, &saved_post_plug_reloc, saved_post_plug_info_start)); memcpy (saved_post_plug_info_start, &saved_post_plug_reloc, sizeof (saved_post_plug_reloc)); } else { dprintf (3, ("%Ix: REC Post: %Ix-%Ix", first, &saved_post_plug, saved_post_plug_info_start)); memcpy (saved_post_plug_info_start, &saved_post_plug, sizeof (saved_post_plug)); recovered_sweep_size += sizeof (saved_post_plug); } } return recovered_sweep_size; } }; void gc_mechanisms::init_mechanisms() { condemned_generation = 0; promotion = FALSE;//TRUE; compaction = TRUE; #ifdef FEATURE_LOH_COMPACTION loh_compaction = gc_heap::loh_compaction_requested(); #else loh_compaction = FALSE; #endif //FEATURE_LOH_COMPACTION heap_expansion = FALSE; concurrent = FALSE; demotion = FALSE; elevation_reduced = FALSE; found_finalizers = FALSE; #ifdef BACKGROUND_GC background_p = gc_heap::background_running_p() != FALSE; #endif //BACKGROUND_GC entry_memory_load = 0; entry_available_physical_mem = 0; exit_memory_load = 0; #ifdef STRESS_HEAP stress_induced = FALSE; #endif // STRESS_HEAP } void gc_mechanisms::first_init() { gc_index = 0; gen0_reduction_count = 0; should_lock_elevation = FALSE; elevation_locked_count = 0; reason = reason_empty; #ifdef BACKGROUND_GC pause_mode = gc_heap::gc_can_use_concurrent ? pause_interactive : pause_batch; #ifdef _DEBUG int debug_pause_mode = static_cast<int>(GCConfig::GetLatencyMode()); if (debug_pause_mode >= 0) { assert (debug_pause_mode <= pause_sustained_low_latency); pause_mode = (gc_pause_mode)debug_pause_mode; } #endif //_DEBUG #else //BACKGROUND_GC pause_mode = pause_batch; #endif //BACKGROUND_GC init_mechanisms(); } void gc_mechanisms::record (gc_history_global* history) { #ifdef MULTIPLE_HEAPS history->num_heaps = gc_heap::n_heaps; #else history->num_heaps = 1; #endif //MULTIPLE_HEAPS history->condemned_generation = condemned_generation; history->gen0_reduction_count = gen0_reduction_count; history->reason = reason; history->pause_mode = (int)pause_mode; history->mem_pressure = entry_memory_load; history->global_mechanisms_p = 0; // start setting the boolean values. if (concurrent) history->set_mechanism_p (global_concurrent); if (compaction) history->set_mechanism_p (global_compaction); if (promotion) history->set_mechanism_p (global_promotion); if (demotion) history->set_mechanism_p (global_demotion); if (card_bundles) history->set_mechanism_p (global_card_bundles); if (elevation_reduced) history->set_mechanism_p (global_elevation); } /********************************** called at the beginning of GC to fix the allocated size to what is really allocated, or to turn the free area into an unused object It needs to be called after all of the other allocation contexts have been fixed since it relies on alloc_allocated. ********************************/ //for_gc_p indicates that the work is being done for GC, //as opposed to concurrent heap verification void gc_heap::fix_youngest_allocation_area() { // The gen 0 alloc context is never used for allocation in the allocator path. It's // still used in the allocation path during GCs. assert (generation_allocation_pointer (youngest_generation) == nullptr); assert (generation_allocation_limit (youngest_generation) == nullptr); heap_segment_allocated (ephemeral_heap_segment) = alloc_allocated; assert (heap_segment_mem (ephemeral_heap_segment) <= heap_segment_allocated (ephemeral_heap_segment)); assert (heap_segment_allocated (ephemeral_heap_segment) <= heap_segment_reserved (ephemeral_heap_segment)); } //for_gc_p indicates that the work is being done for GC, //as opposed to concurrent heap verification void gc_heap::fix_allocation_context (alloc_context* acontext, BOOL for_gc_p, BOOL record_ac_p) { dprintf (3, ("Fixing allocation context %Ix: ptr: %Ix, limit: %Ix", (size_t)acontext, (size_t)acontext->alloc_ptr, (size_t)acontext->alloc_limit)); if (acontext->alloc_ptr == 0) { return; } int align_const = get_alignment_constant (TRUE); #ifdef USE_REGIONS bool is_ephemeral_heap_segment = in_range_for_segment (acontext->alloc_limit, ephemeral_heap_segment); #else // USE_REGIONS bool is_ephemeral_heap_segment = true; #endif // USE_REGIONS if ((!is_ephemeral_heap_segment) || ((size_t)(alloc_allocated - acontext->alloc_limit) > Align (min_obj_size, align_const)) || !for_gc_p) { uint8_t* point = acontext->alloc_ptr; size_t size = (acontext->alloc_limit - acontext->alloc_ptr); // the allocation area was from the free list // it was shortened by Align (min_obj_size) to make room for // at least the shortest unused object size += Align (min_obj_size, align_const); assert ((size >= Align (min_obj_size))); dprintf(3,("Making unused area [%Ix, %Ix[", (size_t)point, (size_t)point + size )); make_unused_array (point, size); if (for_gc_p) { generation_free_obj_space (generation_of (0)) += size; if (record_ac_p) alloc_contexts_used ++; } } else if (for_gc_p) { assert (is_ephemeral_heap_segment); alloc_allocated = acontext->alloc_ptr; assert (heap_segment_allocated (ephemeral_heap_segment) <= heap_segment_committed (ephemeral_heap_segment)); if (record_ac_p) alloc_contexts_used ++; } if (for_gc_p) { // We need to update the alloc_bytes to reflect the portion that we have not used acontext->alloc_bytes -= (acontext->alloc_limit - acontext->alloc_ptr); total_alloc_bytes_soh -= (acontext->alloc_limit - acontext->alloc_ptr); acontext->alloc_ptr = 0; acontext->alloc_limit = acontext->alloc_ptr; } } //used by the heap verification for concurrent gc. //it nulls out the words set by fix_allocation_context for heap_verification void repair_allocation (gc_alloc_context* acontext, void*) { uint8_t* point = acontext->alloc_ptr; if (point != 0) { dprintf (3, ("Clearing [%Ix, %Ix[", (size_t)acontext->alloc_ptr, (size_t)acontext->alloc_limit+Align(min_obj_size))); memclr (acontext->alloc_ptr - plug_skew, (acontext->alloc_limit - acontext->alloc_ptr)+Align (min_obj_size)); } } void void_allocation (gc_alloc_context* acontext, void*) { uint8_t* point = acontext->alloc_ptr; if (point != 0) { dprintf (3, ("Void [%Ix, %Ix[", (size_t)acontext->alloc_ptr, (size_t)acontext->alloc_limit+Align(min_obj_size))); acontext->alloc_ptr = 0; acontext->alloc_limit = acontext->alloc_ptr; } } void gc_heap::repair_allocation_contexts (BOOL repair_p) { GCToEEInterface::GcEnumAllocContexts (repair_p ? repair_allocation : void_allocation, NULL); } struct fix_alloc_context_args { BOOL for_gc_p; void* heap; }; void fix_alloc_context (gc_alloc_context* acontext, void* param) { fix_alloc_context_args* args = (fix_alloc_context_args*)param; g_theGCHeap->FixAllocContext(acontext, (void*)(size_t)(args->for_gc_p), args->heap); } void gc_heap::fix_allocation_contexts (BOOL for_gc_p) { fix_alloc_context_args args; args.for_gc_p = for_gc_p; args.heap = __this; GCToEEInterface::GcEnumAllocContexts(fix_alloc_context, &args); fix_youngest_allocation_area(); } void gc_heap::fix_older_allocation_area (generation* older_gen) { heap_segment* older_gen_seg = generation_allocation_segment (older_gen); if (generation_allocation_limit (older_gen) != heap_segment_plan_allocated (older_gen_seg)) { uint8_t* point = generation_allocation_pointer (older_gen); size_t size = (generation_allocation_limit (older_gen) - generation_allocation_pointer (older_gen)); if (size != 0) { assert ((size >= Align (min_obj_size))); dprintf(3,("Making unused area [%Ix, %Ix[", (size_t)point, (size_t)point+size)); make_unused_array (point, size); if (size >= min_free_list) { generation_allocator (older_gen)->thread_item_front (point, size); add_gen_free (older_gen->gen_num, size); generation_free_list_space (older_gen) += size; } else { generation_free_obj_space (older_gen) += size; } } } else { assert (older_gen_seg != ephemeral_heap_segment); heap_segment_plan_allocated (older_gen_seg) = generation_allocation_pointer (older_gen); generation_allocation_limit (older_gen) = generation_allocation_pointer (older_gen); } generation_allocation_pointer (older_gen) = 0; generation_allocation_limit (older_gen) = 0; } void gc_heap::set_allocation_heap_segment (generation* gen) { #ifdef USE_REGIONS heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); dprintf (REGIONS_LOG, ("set gen%d alloc seg to start seg %Ix", gen->gen_num, heap_segment_mem (seg))); #else uint8_t* p = generation_allocation_start (gen); assert (p); heap_segment* seg = generation_allocation_segment (gen); if (in_range_for_segment (p, seg)) return; // try ephemeral heap segment in case of heap expansion seg = ephemeral_heap_segment; if (!in_range_for_segment (p, seg)) { seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(seg != NULL); while (!in_range_for_segment (p, seg)) { seg = heap_segment_next_rw (seg); PREFIX_ASSUME(seg != NULL); } } #endif //USE_REGIONS generation_allocation_segment (gen) = seg; } void gc_heap::reset_allocation_pointers (generation* gen, uint8_t* start) { assert (start); assert (Align ((size_t)start) == (size_t)start); #ifndef USE_REGIONS generation_allocation_start (gen) = start; #endif //!USE_REGIONS generation_allocation_pointer (gen) = 0;//start + Align (min_obj_size); generation_allocation_limit (gen) = 0;//generation_allocation_pointer (gen); set_allocation_heap_segment (gen); } bool gc_heap::new_allocation_allowed (int gen_number) { if (dd_new_allocation (dynamic_data_of (gen_number)) < 0) { if (gen_number != 0) { // For UOH we will give it more budget before we try a GC. if (settings.concurrent) { dynamic_data* dd2 = dynamic_data_of (gen_number); if (dd_new_allocation (dd2) <= (ptrdiff_t)(-2 * dd_desired_allocation (dd2))) { return TRUE; } } } return FALSE; } #ifndef MULTIPLE_HEAPS else if ((settings.pause_mode != pause_no_gc) && (gen_number == 0)) { dynamic_data* dd0 = dynamic_data_of (0); dprintf (3, ("evaluating, running amount %Id - new %Id = %Id", allocation_running_amount, dd_new_allocation (dd0), (allocation_running_amount - dd_new_allocation (dd0)))); if ((allocation_running_amount - dd_new_allocation (dd0)) > dd_min_size (dd0)) { uint32_t ctime = GCToOSInterface::GetLowPrecisionTimeStamp(); if ((ctime - allocation_running_time) > 1000) { dprintf (2, (">1s since last gen0 gc")); return FALSE; } else { allocation_running_amount = dd_new_allocation (dd0); } } } #endif //MULTIPLE_HEAPS return TRUE; } inline ptrdiff_t gc_heap::get_desired_allocation (int gen_number) { return dd_desired_allocation (dynamic_data_of (gen_number)); } inline ptrdiff_t gc_heap::get_new_allocation (int gen_number) { return dd_new_allocation (dynamic_data_of (gen_number)); } //return the amount allocated so far in gen_number inline ptrdiff_t gc_heap::get_allocation (int gen_number) { dynamic_data* dd = dynamic_data_of (gen_number); return dd_desired_allocation (dd) - dd_new_allocation (dd); } inline BOOL grow_mark_stack (mark*& m, size_t& len, size_t init_len) { size_t new_size = max (init_len, 2*len); mark* tmp = new (nothrow) mark [new_size]; if (tmp) { memcpy (tmp, m, len * sizeof (mark)); delete m; m = tmp; len = new_size; return TRUE; } else { dprintf (1, ("Failed to allocate %Id bytes for mark stack", (len * sizeof (mark)))); return FALSE; } } inline uint8_t* pinned_plug (mark* m) { return m->first; } inline size_t& pinned_len (mark* m) { return m->len; } inline void set_new_pin_info (mark* m, uint8_t* pin_free_space_start) { m->len = pinned_plug (m) - pin_free_space_start; #ifdef SHORT_PLUGS m->allocation_context_start_region = pin_free_space_start; #endif //SHORT_PLUGS } #ifdef SHORT_PLUGS inline uint8_t*& pin_allocation_context_start_region (mark* m) { return m->allocation_context_start_region; } uint8_t* get_plug_start_in_saved (uint8_t* old_loc, mark* pinned_plug_entry) { uint8_t* saved_pre_plug_info = (uint8_t*)(pinned_plug_entry->get_pre_plug_reloc_info()); uint8_t* plug_start_in_saved = saved_pre_plug_info + (old_loc - (pinned_plug (pinned_plug_entry) - sizeof (plug_and_gap))); //dprintf (1, ("detected a very short plug: %Ix before PP %Ix, pad %Ix", // old_loc, pinned_plug (pinned_plug_entry), plug_start_in_saved)); dprintf (1, ("EP: %Ix(%Ix), %Ix", old_loc, pinned_plug (pinned_plug_entry), plug_start_in_saved)); return plug_start_in_saved; } inline void set_padding_in_expand (uint8_t* old_loc, BOOL set_padding_on_saved_p, mark* pinned_plug_entry) { if (set_padding_on_saved_p) { set_plug_padded (get_plug_start_in_saved (old_loc, pinned_plug_entry)); } else { set_plug_padded (old_loc); } } inline void clear_padding_in_expand (uint8_t* old_loc, BOOL set_padding_on_saved_p, mark* pinned_plug_entry) { if (set_padding_on_saved_p) { clear_plug_padded (get_plug_start_in_saved (old_loc, pinned_plug_entry)); } else { clear_plug_padded (old_loc); } } #endif //SHORT_PLUGS void gc_heap::reset_pinned_queue() { mark_stack_tos = 0; mark_stack_bos = 0; } void gc_heap::reset_pinned_queue_bos() { mark_stack_bos = 0; } // last_pinned_plug is only for asserting purpose. void gc_heap::merge_with_last_pinned_plug (uint8_t* last_pinned_plug, size_t plug_size) { if (last_pinned_plug) { mark& last_m = mark_stack_array[mark_stack_tos - 1]; assert (last_pinned_plug == last_m.first); if (last_m.saved_post_p) { last_m.saved_post_p = FALSE; dprintf (3, ("setting last plug %Ix post to false", last_m.first)); // We need to recover what the gap has overwritten. memcpy ((last_m.first + last_m.len - sizeof (plug_and_gap)), &(last_m.saved_post_plug), sizeof (gap_reloc_pair)); } last_m.len += plug_size; dprintf (3, ("recovered the last part of plug %Ix, setting its plug size to %Ix", last_m.first, last_m.len)); } } void gc_heap::set_allocator_next_pin (generation* gen) { dprintf (3, ("SANP: gen%d, ptr; %Ix, limit: %Ix", gen->gen_num, generation_allocation_pointer (gen), generation_allocation_limit (gen))); if (!(pinned_plug_que_empty_p())) { mark* oldest_entry = oldest_pin(); uint8_t* plug = pinned_plug (oldest_entry); if ((plug >= generation_allocation_pointer (gen)) && (plug < generation_allocation_limit (gen))) { #ifdef USE_REGIONS assert (region_of (generation_allocation_pointer (gen)) == region_of (generation_allocation_limit (gen) - 1)); #endif //USE_REGIONS generation_allocation_limit (gen) = pinned_plug (oldest_entry); dprintf (3, ("SANP: get next pin free space in gen%d for alloc: %Ix->%Ix(%Id)", gen->gen_num, generation_allocation_pointer (gen), generation_allocation_limit (gen), (generation_allocation_limit (gen) - generation_allocation_pointer (gen)))); } else assert (!((plug < generation_allocation_pointer (gen)) && (plug >= heap_segment_mem (generation_allocation_segment (gen))))); } } // After we set the info, we increase tos. void gc_heap::set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, generation* gen) { #ifndef _DEBUG UNREFERENCED_PARAMETER(last_pinned_plug); #endif //_DEBUG mark& m = mark_stack_array[mark_stack_tos]; assert (m.first == last_pinned_plug); m.len = plug_len; mark_stack_tos++; assert (gen != 0); // Why are we checking here? gen is never 0. if (gen != 0) { set_allocator_next_pin (gen); } } size_t gc_heap::deque_pinned_plug () { size_t m = mark_stack_bos; dprintf (3, ("deque: %Id->%Ix", mark_stack_bos, pinned_plug (pinned_plug_of (m)))); mark_stack_bos++; return m; } inline mark* gc_heap::pinned_plug_of (size_t bos) { return &mark_stack_array [ bos ]; } inline mark* gc_heap::oldest_pin () { return pinned_plug_of (mark_stack_bos); } inline BOOL gc_heap::pinned_plug_que_empty_p () { return (mark_stack_bos == mark_stack_tos); } inline mark* gc_heap::before_oldest_pin() { if (mark_stack_bos >= 1) return pinned_plug_of (mark_stack_bos-1); else return 0; } inline BOOL gc_heap::ephemeral_pointer_p (uint8_t* o) { #ifdef USE_REGIONS int gen_num = object_gennum ((uint8_t*)o); assert (gen_num >= 0); return (gen_num < max_generation); #else return ((o >= ephemeral_low) && (o < ephemeral_high)); #endif //USE_REGIONS } #ifdef USE_REGIONS // This assumes o is guaranteed to be in a region. inline bool gc_heap::is_in_condemned_gc (uint8_t* o) { assert ((o >= g_gc_lowest_address) && (o < g_gc_highest_address)); int condemned_gen = settings.condemned_generation; if (condemned_gen < max_generation) { int gen = get_region_gen_num (o); if (gen > condemned_gen) { return false; } } return true; } // REGIONS TODO - // This method can be called by GCHeap::Promote/Relocate which means // it could be in the heap range but not actually in a valid region. // This would return true but find_object will return 0. But this // seems counter-intuitive so we should consider a better implementation. inline bool gc_heap::is_in_condemned (uint8_t* o) { if ((o >= g_gc_lowest_address) && (o < g_gc_highest_address)) return is_in_condemned_gc (o); else return false; } inline bool gc_heap::should_check_brick_for_reloc (uint8_t* o) { assert ((o >= g_gc_lowest_address) && (o < g_gc_highest_address)); int condemned_gen = settings.condemned_generation; if (condemned_gen < max_generation) { heap_segment* region = region_of (o); int gen = get_region_gen_num (region); if ((gen > condemned_gen) || (heap_segment_swept_in_plan (region))) { if (heap_segment_swept_in_plan (region)) { dprintf (4444, ("-Rsip %Ix", o)); } return false; } } else if (heap_segment_swept_in_plan (region_of (o))) { return false; } return true; } #endif //USE_REGIONS #ifdef MH_SC_MARK inline int& gc_heap::mark_stack_busy() { return g_mark_stack_busy [(heap_number+2)*HS_CACHE_LINE_SIZE/sizeof(int)]; } #endif //MH_SC_MARK void gc_heap::make_mark_stack (mark* arr) { reset_pinned_queue(); mark_stack_array = arr; mark_stack_array_length = MARK_STACK_INITIAL_LENGTH; #ifdef MH_SC_MARK mark_stack_busy() = 0; #endif //MH_SC_MARK } #ifdef BACKGROUND_GC inline size_t& gc_heap::bpromoted_bytes(int thread) { #ifdef MULTIPLE_HEAPS return g_bpromoted [thread*16]; #else //MULTIPLE_HEAPS UNREFERENCED_PARAMETER(thread); return g_bpromoted; #endif //MULTIPLE_HEAPS } void gc_heap::make_background_mark_stack (uint8_t** arr) { background_mark_stack_array = arr; background_mark_stack_array_length = MARK_STACK_INITIAL_LENGTH; background_mark_stack_tos = arr; } void gc_heap::make_c_mark_list (uint8_t** arr) { c_mark_list = arr; c_mark_list_index = 0; c_mark_list_length = 1 + (OS_PAGE_SIZE / MIN_OBJECT_SIZE); } #endif //BACKGROUND_GC #ifdef CARD_BUNDLE // The card bundle keeps track of groups of card words. static const size_t card_bundle_word_width = 32; // How do we express the fact that 32 bits (card_word_width) is one uint32_t? static const size_t card_bundle_size = (size_t)(GC_PAGE_SIZE / (sizeof(uint32_t)*card_bundle_word_width)); inline size_t card_bundle_word (size_t cardb) { return cardb / card_bundle_word_width; } inline uint32_t card_bundle_bit (size_t cardb) { return (uint32_t)(cardb % card_bundle_word_width); } size_t align_cardw_on_bundle (size_t cardw) { return ((size_t)(cardw + card_bundle_size - 1) & ~(card_bundle_size - 1 )); } // Get the card bundle representing a card word size_t cardw_card_bundle (size_t cardw) { return cardw / card_bundle_size; } // Get the first card word in a card bundle size_t card_bundle_cardw (size_t cardb) { return cardb * card_bundle_size; } // Clear the specified card bundle void gc_heap::card_bundle_clear (size_t cardb) { uint32_t bit = (uint32_t)(1 << card_bundle_bit (cardb)); uint32_t* bundle = &card_bundle_table[card_bundle_word (cardb)]; #ifdef MULTIPLE_HEAPS // card bundles may straddle segments and heaps, thus bits may be cleared concurrently if ((*bundle & bit) != 0) { Interlocked::And (bundle, ~bit); } #else *bundle &= ~bit; #endif // check for races assert ((*bundle & bit) == 0); dprintf (2, ("Cleared card bundle %Ix [%Ix, %Ix[", cardb, (size_t)card_bundle_cardw (cardb), (size_t)card_bundle_cardw (cardb+1))); } inline void set_bundle_bits (uint32_t* bundle, uint32_t bits) { #ifdef MULTIPLE_HEAPS // card bundles may straddle segments and heaps, thus bits may be set concurrently if ((*bundle & bits) != bits) { Interlocked::Or (bundle, bits); } #else *bundle |= bits; #endif // check for races assert ((*bundle & bits) == bits); } void gc_heap::card_bundle_set (size_t cardb) { uint32_t bits = (1 << card_bundle_bit (cardb)); set_bundle_bits (&card_bundle_table [card_bundle_word (cardb)], bits); } // Set the card bundle bits between start_cardb and end_cardb void gc_heap::card_bundles_set (size_t start_cardb, size_t end_cardb) { if (start_cardb == end_cardb) { card_bundle_set(start_cardb); return; } size_t start_word = card_bundle_word (start_cardb); size_t end_word = card_bundle_word (end_cardb); if (start_word < end_word) { // Set the partial words uint32_t bits = highbits (~0u, card_bundle_bit (start_cardb)); set_bundle_bits (&card_bundle_table [start_word], bits); if (card_bundle_bit (end_cardb)) { bits = lowbits (~0u, card_bundle_bit (end_cardb)); set_bundle_bits (&card_bundle_table [end_word], bits); } // Set the full words for (size_t i = start_word + 1; i < end_word; i++) { card_bundle_table [i] = ~0u; } } else { uint32_t bits = (highbits (~0u, card_bundle_bit (start_cardb)) & lowbits (~0u, card_bundle_bit (end_cardb))); set_bundle_bits (&card_bundle_table [start_word], bits); } } // Indicates whether the specified bundle is set. BOOL gc_heap::card_bundle_set_p (size_t cardb) { return (card_bundle_table[card_bundle_word(cardb)] & (1 << card_bundle_bit (cardb))); } // Returns the size (in bytes) of a card bundle representing the region from 'from' to 'end' size_t size_card_bundle_of (uint8_t* from, uint8_t* end) { // Number of heap bytes represented by a card bundle word size_t cbw_span = card_size * card_word_width * card_bundle_size * card_bundle_word_width; // Align the start of the region down from = (uint8_t*)((size_t)from & ~(cbw_span - 1)); // Align the end of the region up end = (uint8_t*)((size_t)(end + (cbw_span - 1)) & ~(cbw_span - 1)); // Make sure they're really aligned assert (((size_t)from & (cbw_span - 1)) == 0); assert (((size_t)end & (cbw_span - 1)) == 0); return ((end - from) / cbw_span) * sizeof (uint32_t); } // Takes a pointer to a card bundle table and an address, and returns a pointer that represents // where a theoretical card bundle table that represents every address (starting from 0) would // start if the bundle word representing the address were to be located at the pointer passed in. // The returned 'translated' pointer makes it convenient/fast to calculate where the card bundle // for a given address is using a simple shift operation on the address. uint32_t* translate_card_bundle_table (uint32_t* cb, uint8_t* lowest_address) { // The number of bytes of heap memory represented by a card bundle word const size_t heap_bytes_for_bundle_word = card_size * card_word_width * card_bundle_size * card_bundle_word_width; // Each card bundle word is 32 bits return (uint32_t*)((uint8_t*)cb - (((size_t)lowest_address / heap_bytes_for_bundle_word) * sizeof (uint32_t))); } void gc_heap::enable_card_bundles () { if (can_use_write_watch_for_card_table() && (!card_bundles_enabled())) { dprintf (1, ("Enabling card bundles")); // We initially set all of the card bundles card_bundles_set (cardw_card_bundle (card_word (card_of (lowest_address))), cardw_card_bundle (align_cardw_on_bundle (card_word (card_of (highest_address))))); settings.card_bundles = TRUE; } } BOOL gc_heap::card_bundles_enabled () { return settings.card_bundles; } #endif // CARD_BUNDLE #if defined (HOST_64BIT) #define brick_size ((size_t)4096) #else #define brick_size ((size_t)2048) #endif //HOST_64BIT inline size_t gc_heap::brick_of (uint8_t* add) { return (size_t)(add - lowest_address) / brick_size; } inline uint8_t* gc_heap::brick_address (size_t brick) { return lowest_address + (brick_size * brick); } void gc_heap::clear_brick_table (uint8_t* from, uint8_t* end) { size_t from_brick = brick_of (from); size_t end_brick = brick_of (end); memset (&brick_table[from_brick], 0, sizeof(brick_table[from_brick])*(end_brick-from_brick)); } //codes for the brick entries: //entry == 0 -> not assigned //entry >0 offset is entry-1 //entry <0 jump back entry bricks inline void gc_heap::set_brick (size_t index, ptrdiff_t val) { if (val < -32767) { val = -32767; } assert (val < 32767); if (val >= 0) brick_table [index] = (short)val+1; else brick_table [index] = (short)val; dprintf (3, ("set brick[%Ix] to %d\n", index, (short)val)); } inline int gc_heap::get_brick_entry (size_t index) { #ifdef MULTIPLE_HEAPS return VolatileLoadWithoutBarrier(&brick_table [index]); #else return brick_table[index]; #endif } inline uint8_t* align_on_brick (uint8_t* add) { return (uint8_t*)((size_t)(add + brick_size - 1) & ~(brick_size - 1)); } inline uint8_t* align_lower_brick (uint8_t* add) { return (uint8_t*)(((size_t)add) & ~(brick_size - 1)); } size_t size_brick_of (uint8_t* from, uint8_t* end) { assert (((size_t)from & (brick_size-1)) == 0); assert (((size_t)end & (brick_size-1)) == 0); return ((end - from) / brick_size) * sizeof (short); } inline uint8_t* gc_heap::card_address (size_t card) { return (uint8_t*) (card_size * card); } inline size_t gc_heap::card_of ( uint8_t* object) { return (size_t)(object) / card_size; } inline uint8_t* align_on_card (uint8_t* add) { return (uint8_t*)((size_t)(add + card_size - 1) & ~(card_size - 1 )); } inline uint8_t* align_on_card_word (uint8_t* add) { return (uint8_t*) ((size_t)(add + (card_size*card_word_width)-1) & ~(card_size*card_word_width - 1)); } inline uint8_t* align_lower_card (uint8_t* add) { return (uint8_t*)((size_t)add & ~(card_size-1)); } inline void gc_heap::clear_card (size_t card) { card_table [card_word (card)] = (card_table [card_word (card)] & ~(1 << card_bit (card))); dprintf (3,("Cleared card %Ix [%Ix, %Ix[", card, (size_t)card_address (card), (size_t)card_address (card+1))); } inline void gc_heap::set_card (size_t card) { size_t word = card_word (card); card_table[word] = (card_table [word] | (1 << card_bit (card))); #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES // Also set the card bundle that corresponds to the card size_t bundle_to_set = cardw_card_bundle(word); card_bundle_set(bundle_to_set); dprintf (3,("Set card %Ix [%Ix, %Ix[ and bundle %Ix", card, (size_t)card_address (card), (size_t)card_address (card+1), bundle_to_set)); #endif } inline BOOL gc_heap::card_set_p (size_t card) { return ( card_table [ card_word (card) ] & (1 << card_bit (card))); } // Returns the number of DWORDs in the card table that cover the // range of addresses [from, end[. size_t count_card_of (uint8_t* from, uint8_t* end) { return card_word (gcard_of (end - 1)) - card_word (gcard_of (from)) + 1; } // Returns the number of bytes to allocate for a card table // that covers the range of addresses [from, end[. size_t size_card_of (uint8_t* from, uint8_t* end) { return count_card_of (from, end) * sizeof(uint32_t); } // We don't store seg_mapping_table in card_table_info because there's only always one view. class card_table_info { public: unsigned recount; uint8_t* lowest_address; uint8_t* highest_address; short* brick_table; #ifdef CARD_BUNDLE uint32_t* card_bundle_table; #endif //CARD_BUNDLE // mark_array is always at the end of the data structure because we // want to be able to make one commit call for everything before it. #ifdef BACKGROUND_GC uint32_t* mark_array; #endif //BACKGROUND_GC size_t size; uint32_t* next_card_table; }; //These are accessors on untranslated cardtable inline unsigned& card_table_refcount (uint32_t* c_table) { return *(unsigned*)((char*)c_table - sizeof (card_table_info)); } inline uint8_t*& card_table_lowest_address (uint32_t* c_table) { return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->lowest_address; } uint32_t* translate_card_table (uint32_t* ct) { return (uint32_t*)((uint8_t*)ct - card_word (gcard_of (card_table_lowest_address (ct))) * sizeof(uint32_t)); } inline uint8_t*& card_table_highest_address (uint32_t* c_table) { return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->highest_address; } inline short*& card_table_brick_table (uint32_t* c_table) { return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->brick_table; } #ifdef CARD_BUNDLE inline uint32_t*& card_table_card_bundle_table (uint32_t* c_table) { return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->card_bundle_table; } #endif //CARD_BUNDLE #ifdef BACKGROUND_GC inline uint32_t*& card_table_mark_array (uint32_t* c_table) { return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->mark_array; } #ifdef HOST_64BIT #define mark_bit_pitch ((size_t)16) #else #define mark_bit_pitch ((size_t)8) #endif // HOST_64BIT #define mark_word_width ((size_t)32) #define mark_word_size (mark_word_width * mark_bit_pitch) inline uint8_t* align_on_mark_bit (uint8_t* add) { return (uint8_t*)((size_t)(add + (mark_bit_pitch - 1)) & ~(mark_bit_pitch - 1)); } inline uint8_t* align_lower_mark_bit (uint8_t* add) { return (uint8_t*)((size_t)(add) & ~(mark_bit_pitch - 1)); } inline BOOL is_aligned_on_mark_word (uint8_t* add) { return ((size_t)add == ((size_t)(add) & ~(mark_word_size - 1))); } inline uint8_t* align_on_mark_word (uint8_t* add) { return (uint8_t*)((size_t)(add + mark_word_size - 1) & ~(mark_word_size - 1)); } inline uint8_t* align_lower_mark_word (uint8_t* add) { return (uint8_t*)((size_t)(add) & ~(mark_word_size - 1)); } inline size_t mark_bit_of (uint8_t* add) { return ((size_t)add / mark_bit_pitch); } inline unsigned int mark_bit_bit (size_t mark_bit) { return (unsigned int)(mark_bit % mark_word_width); } inline size_t mark_bit_word (size_t mark_bit) { return (mark_bit / mark_word_width); } inline size_t mark_word_of (uint8_t* add) { return ((size_t)add) / mark_word_size; } uint8_t* mark_word_address (size_t wd) { return (uint8_t*)(wd*mark_word_size); } uint8_t* mark_bit_address (size_t mark_bit) { return (uint8_t*)(mark_bit*mark_bit_pitch); } inline size_t mark_bit_bit_of (uint8_t* add) { return (((size_t)add / mark_bit_pitch) % mark_word_width); } inline unsigned int gc_heap::mark_array_marked(uint8_t* add) { return mark_array [mark_word_of (add)] & (1 << mark_bit_bit_of (add)); } inline BOOL gc_heap::is_mark_bit_set (uint8_t* add) { return (mark_array [mark_word_of (add)] & (1 << mark_bit_bit_of (add))); } inline void gc_heap::mark_array_set_marked (uint8_t* add) { size_t index = mark_word_of (add); uint32_t val = (1 << mark_bit_bit_of (add)); #ifdef MULTIPLE_HEAPS Interlocked::Or (&(mark_array [index]), val); #else mark_array [index] |= val; #endif } inline void gc_heap::mark_array_clear_marked (uint8_t* add) { mark_array [mark_word_of (add)] &= ~(1 << mark_bit_bit_of (add)); } size_t size_mark_array_of (uint8_t* from, uint8_t* end) { assert (((size_t)from & ((mark_word_size)-1)) == 0); assert (((size_t)end & ((mark_word_size)-1)) == 0); return sizeof (uint32_t)*(((end - from) / mark_word_size)); } //In order to eliminate the lowest_address in the mark array //computations (mark_word_of, etc) mark_array is offset // according to the lowest_address. uint32_t* translate_mark_array (uint32_t* ma) { return (uint32_t*)((uint8_t*)ma - size_mark_array_of (0, g_gc_lowest_address)); } // from and end must be page aligned addresses. void gc_heap::clear_mark_array (uint8_t* from, uint8_t* end, BOOL check_only/*=TRUE*/ #ifdef FEATURE_BASICFREEZE , BOOL read_only/*=FALSE*/ #endif // FEATURE_BASICFREEZE ) { if(!gc_can_use_concurrent) return; #ifdef FEATURE_BASICFREEZE if (!read_only) #endif // FEATURE_BASICFREEZE { assert (from == align_on_mark_word (from)); } assert (end == align_on_mark_word (end)); uint8_t* current_lowest_address = background_saved_lowest_address; uint8_t* current_highest_address = background_saved_highest_address; //there is a possibility of the addresses to be //outside of the covered range because of a newly allocated //large object segment if ((end <= current_highest_address) && (from >= current_lowest_address)) { size_t beg_word = mark_word_of (align_on_mark_word (from)); //align end word to make sure to cover the address size_t end_word = mark_word_of (align_on_mark_word (end)); dprintf (3, ("Calling clearing mark array [%Ix, %Ix[ for addresses [%Ix, %Ix[(%s)", (size_t)mark_word_address (beg_word), (size_t)mark_word_address (end_word), (size_t)from, (size_t)end, (check_only ? "check_only" : "clear"))); if (!check_only) { uint8_t* op = from; while (op < mark_word_address (beg_word)) { mark_array_clear_marked (op); op += mark_bit_pitch; } memset (&mark_array[beg_word], 0, (end_word - beg_word)*sizeof (uint32_t)); } #ifdef _DEBUG else { //Beware, it is assumed that the mark array word straddling //start has been cleared before //verify that the array is empty. size_t markw = mark_word_of (align_on_mark_word (from)); size_t markw_end = mark_word_of (align_on_mark_word (end)); while (markw < markw_end) { assert (!(mark_array [markw])); markw++; } uint8_t* p = mark_word_address (markw_end); while (p < end) { assert (!(mark_array_marked (p))); p++; } } #endif //_DEBUG } } #endif //BACKGROUND_GC //These work on untranslated card tables inline uint32_t*& card_table_next (uint32_t* c_table) { return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->next_card_table; } inline size_t& card_table_size (uint32_t* c_table) { return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->size; } void own_card_table (uint32_t* c_table) { card_table_refcount (c_table) += 1; } void destroy_card_table (uint32_t* c_table); void delete_next_card_table (uint32_t* c_table) { uint32_t* n_table = card_table_next (c_table); if (n_table) { if (card_table_next (n_table)) { delete_next_card_table (n_table); } if (card_table_refcount (n_table) == 0) { destroy_card_table (n_table); card_table_next (c_table) = 0; } } } void release_card_table (uint32_t* c_table) { assert (card_table_refcount (c_table) >0); card_table_refcount (c_table) -= 1; if (card_table_refcount (c_table) == 0) { delete_next_card_table (c_table); if (card_table_next (c_table) == 0) { destroy_card_table (c_table); // sever the link from the parent if (&g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))] == c_table) { g_gc_card_table = 0; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES g_gc_card_bundle_table = 0; #endif #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP SoftwareWriteWatch::StaticClose(); #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP } else { uint32_t* p_table = &g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))]; if (p_table) { while (p_table && (card_table_next (p_table) != c_table)) p_table = card_table_next (p_table); card_table_next (p_table) = 0; } } } } } void destroy_card_table (uint32_t* c_table) { // delete (uint32_t*)&card_table_refcount(c_table); GCToOSInterface::VirtualRelease (&card_table_refcount(c_table), card_table_size(c_table)); dprintf (2, ("Table Virtual Free : %Ix", (size_t)&card_table_refcount(c_table))); } void gc_heap::get_card_table_element_sizes (uint8_t* start, uint8_t* end, size_t sizes[total_bookkeeping_elements]) { memset (sizes, 0, sizeof(size_t) * total_bookkeeping_elements); sizes[card_table_element] = size_card_of (start, end); sizes[brick_table_element] = size_brick_of (start, end); #ifdef CARD_BUNDLE if (can_use_write_watch_for_card_table()) { sizes[card_bundle_table_element] = size_card_bundle_of (start, end); } #endif //CARD_BUNDLE #if defined(FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP) && defined (BACKGROUND_GC) if (gc_can_use_concurrent) { sizes[software_write_watch_table_element] = SoftwareWriteWatch::GetTableByteSize(start, end); } #endif //FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP && BACKGROUND_GC sizes[seg_mapping_table_element] = size_seg_mapping_table_of (start, end); #ifdef BACKGROUND_GC if (gc_can_use_concurrent) { sizes[mark_array_element] = size_mark_array_of (start, end); } #endif //BACKGROUND_GC } void gc_heap::get_card_table_element_layout (uint8_t* start, uint8_t* end, size_t layout[total_bookkeeping_elements + 1]) { size_t sizes[total_bookkeeping_elements]; get_card_table_element_sizes(start, end, sizes); const size_t alignment[total_bookkeeping_elements + 1] = { sizeof (uint32_t), // card_table_element sizeof (short), // brick_table_element #ifdef CARD_BUNDLE sizeof (uint32_t), // card_bundle_table_element #endif //CARD_BUNDLE #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP sizeof(size_t), // software_write_watch_table_element #endif //FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP sizeof (uint8_t*), // seg_mapping_table_element #ifdef BACKGROUND_GC // In order to avoid a dependency between commit_mark_array_by_range and this logic, it is easier to make sure // pages for mark array never overlaps with pages in the seg mapping table. That way commit_mark_array_by_range // will never commit a page that is already committed here for the seg mapping table. OS_PAGE_SIZE, // mark_array_element #endif //BACKGROUND_GC // commit_mark_array_by_range extends the end pointer of the commit to the next page boundary, we better make sure it // is reserved OS_PAGE_SIZE // total_bookkeeping_elements }; layout[card_table_element] = ALIGN_UP(sizeof(card_table_info), alignment[card_table_element]); for (int element = brick_table_element; element <= total_bookkeeping_elements; element++) { layout[element] = layout[element - 1] + sizes[element - 1]; if ((element != total_bookkeeping_elements) && (sizes[element] != 0)) { layout[element] = ALIGN_UP(layout[element], alignment[element]); } } } #ifdef USE_REGIONS bool gc_heap::on_used_changed (uint8_t* new_used) { if (new_used > bookkeeping_covered_committed) { bool speculative_commit_tried = false; #ifdef STRESS_REGIONS if (gc_rand::get_rand(10) > 3) { dprintf (REGIONS_LOG, ("skipping speculative commit under stress regions")); speculative_commit_tried = true; } #endif while (true) { uint8_t* new_bookkeeping_covered_committed = nullptr; if (speculative_commit_tried) { new_bookkeeping_covered_committed = new_used; } else { uint64_t committed_size = (uint64_t)(bookkeeping_covered_committed - g_gc_lowest_address); uint64_t total_size = (uint64_t)(g_gc_highest_address - g_gc_lowest_address); assert (committed_size <= total_size); assert (committed_size < (UINT64_MAX / 2)); uint64_t new_committed_size = min(committed_size * 2, total_size); assert ((UINT64_MAX - new_committed_size) > (uint64_t)g_gc_lowest_address); uint8_t* double_commit = g_gc_lowest_address + new_committed_size; new_bookkeeping_covered_committed = max(double_commit, new_used); dprintf (REGIONS_LOG, ("committed_size = %Id", committed_size)); dprintf (REGIONS_LOG, ("total_size = %Id", total_size)); dprintf (REGIONS_LOG, ("new_committed_size = %Id", new_committed_size)); dprintf (REGIONS_LOG, ("double_commit = %p", double_commit)); } dprintf (REGIONS_LOG, ("bookkeeping_covered_committed = %p", bookkeeping_covered_committed)); dprintf (REGIONS_LOG, ("new_bookkeeping_covered_committed = %p", new_bookkeeping_covered_committed)); if (inplace_commit_card_table (bookkeeping_covered_committed, new_bookkeeping_covered_committed)) { bookkeeping_covered_committed = new_bookkeeping_covered_committed; break; } else { if (new_bookkeeping_covered_committed == new_used) { dprintf (REGIONS_LOG, ("The minimal commit for the GC bookkeepping data structure failed, giving up")); return false; } dprintf (REGIONS_LOG, ("The speculative commit for the GC bookkeepping data structure failed, retry for minimal commit")); speculative_commit_tried = true; } } } return true; } bool gc_heap::inplace_commit_card_table (uint8_t* from, uint8_t* to) { dprintf (REGIONS_LOG, ("inplace_commit_card_table(%p, %p), size = %Id", from, to, to - from)); uint8_t* start = g_gc_lowest_address; uint8_t* end = g_gc_highest_address; uint8_t* commit_begins[total_bookkeeping_elements]; size_t commit_sizes[total_bookkeeping_elements]; size_t new_sizes[total_bookkeeping_elements]; bool initial_commit = (from == start); bool additional_commit = !initial_commit && (to > from); if (initial_commit || additional_commit) { #ifdef DEBUG size_t offsets[total_bookkeeping_elements + 1]; get_card_table_element_layout(start, end, offsets); dprintf (REGIONS_LOG, ("layout")); for (int i = card_table_element; i <= total_bookkeeping_elements; i++) { assert (offsets[i] == card_table_element_layout[i]); dprintf (REGIONS_LOG, ("%Id", card_table_element_layout[i])); } #endif get_card_table_element_sizes (start, to, new_sizes); #ifdef DEBUG dprintf (REGIONS_LOG, ("new_sizes")); for (int i = card_table_element; i < total_bookkeeping_elements; i++) { dprintf (REGIONS_LOG, ("%Id", new_sizes[i])); } if (additional_commit) { size_t current_sizes[total_bookkeeping_elements]; get_card_table_element_sizes (start, from, current_sizes); dprintf (REGIONS_LOG, ("old_sizes")); for (int i = card_table_element; i < total_bookkeeping_elements; i++) { assert (current_sizes[i] == bookkeeping_sizes[i]); dprintf (REGIONS_LOG, ("%Id", bookkeeping_sizes[i])); } } #endif for (int i = card_table_element; i <= seg_mapping_table_element; i++) { uint8_t* required_begin = nullptr; uint8_t* required_end = nullptr; uint8_t* commit_begin = nullptr; uint8_t* commit_end = nullptr; if (initial_commit) { required_begin = bookkeeping_covered_start + ((i == card_table_element) ? 0 : card_table_element_layout[i]); required_end = bookkeeping_covered_start + card_table_element_layout[i] + new_sizes[i]; commit_begin = align_lower_page(required_begin); } else { assert (additional_commit); required_begin = bookkeeping_covered_start + card_table_element_layout[i] + bookkeeping_sizes[i]; required_end = required_begin + new_sizes[i] - bookkeeping_sizes[i]; commit_begin = align_on_page(required_begin); } assert (required_begin <= required_end); commit_end = align_on_page(required_end); commit_end = min (commit_end, align_lower_page(bookkeeping_covered_start + card_table_element_layout[i + 1])); commit_begin = min (commit_begin, commit_end); assert (commit_begin <= commit_end); dprintf (REGIONS_LOG, ("required = [%p, %p), size = %Id", required_begin, required_end, required_end - required_begin)); dprintf (REGIONS_LOG, ("commit = [%p, %p), size = %Id", commit_begin, commit_end, commit_end - commit_begin)); commit_begins[i] = commit_begin; commit_sizes[i] = (size_t)(commit_end - commit_begin); } dprintf (REGIONS_LOG, ("---------------------------------------")); } else { return true; } int failed_commit = -1; for (int i = card_table_element; i <= seg_mapping_table_element; i++) { bool succeed; if (commit_sizes[i] > 0) { succeed = virtual_commit (commit_begins[i], commit_sizes[i], gc_oh_num::none); if (!succeed) { failed_commit = i; break; } } } if (failed_commit == -1) { for (int i = card_table_element; i < total_bookkeeping_elements; i++) { bookkeeping_sizes[i] = new_sizes[i]; } } else { for (int i = card_table_element; i < failed_commit; i++) { bool succeed; if (commit_sizes[i] > 0) { succeed = virtual_decommit (commit_begins[i], commit_sizes[i], gc_oh_num::none); assert (succeed); } } return false; } return true; } #endif //USE_REGIONS uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end) { assert (g_gc_lowest_address == start); assert (g_gc_highest_address == end); uint32_t virtual_reserve_flags = VirtualReserveFlags::None; #ifdef CARD_BUNDLE if (can_use_write_watch_for_card_table()) { #ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES // If we're not manually managing the card bundles, we will need to use OS write // watch APIs over this region to track changes. virtual_reserve_flags |= VirtualReserveFlags::WriteWatch; #endif } #endif //CARD_BUNDLE get_card_table_element_layout(start, end, card_table_element_layout); size_t alloc_size = card_table_element_layout[total_bookkeeping_elements]; uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size, 0, virtual_reserve_flags); #ifdef USE_REGIONS bookkeeping_covered_start = mem; #endif //USE_REGIONS if (!mem) return 0; dprintf (2, ("Init - Card table alloc for %Id bytes: [%Ix, %Ix[", alloc_size, (size_t)mem, (size_t)(mem+alloc_size))); #ifdef USE_REGIONS if (!inplace_commit_card_table (g_gc_lowest_address, global_region_allocator.get_left_used_unsafe())) { dprintf (1, ("Card table commit failed")); GCToOSInterface::VirtualRelease (mem, alloc_size); return 0; } bookkeeping_covered_committed = global_region_allocator.get_left_used_unsafe(); #else // in case of background gc, the mark array will be committed separately (per segment). size_t commit_size = card_table_element_layout[seg_mapping_table_element + 1]; if (!virtual_commit (mem, commit_size, gc_oh_num::none)) { dprintf (1, ("Card table commit failed")); GCToOSInterface::VirtualRelease (mem, alloc_size); return 0; } #endif //USE_REGIONS // initialize the ref count uint32_t* ct = (uint32_t*)(mem + card_table_element_layout[card_table_element]); card_table_refcount (ct) = 0; card_table_lowest_address (ct) = start; card_table_highest_address (ct) = end; card_table_brick_table (ct) = (short*)(mem + card_table_element_layout[brick_table_element]); card_table_size (ct) = alloc_size; card_table_next (ct) = 0; #ifdef CARD_BUNDLE card_table_card_bundle_table (ct) = (uint32_t*)(mem + card_table_element_layout[card_bundle_table_element]); #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), g_gc_lowest_address); #endif #endif //CARD_BUNDLE #if defined(FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP) && defined (BACKGROUND_GC) if (gc_can_use_concurrent) { SoftwareWriteWatch::InitializeUntranslatedTable(mem + card_table_element_layout[software_write_watch_table_element], start); } #endif //FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP && BACKGROUND_GC seg_mapping_table = (seg_mapping*)(mem + card_table_element_layout[seg_mapping_table_element]); seg_mapping_table = (seg_mapping*)((uint8_t*)seg_mapping_table - size_seg_mapping_table_of (0, (align_lower_segment (g_gc_lowest_address)))); #ifdef BACKGROUND_GC if (gc_can_use_concurrent) card_table_mark_array (ct) = (uint32_t*)(mem + card_table_element_layout[mark_array_element]); else card_table_mark_array (ct) = NULL; #endif //BACKGROUND_GC return translate_card_table(ct); } void gc_heap::set_fgm_result (failure_get_memory f, size_t s, BOOL loh_p) { #ifdef MULTIPLE_HEAPS for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; hp->fgm_result.set_fgm (f, s, loh_p); } #else //MULTIPLE_HEAPS fgm_result.set_fgm (f, s, loh_p); #endif //MULTIPLE_HEAPS } //returns 0 for success, -1 otherwise // We are doing all the decommitting here because we want to make sure we have // enough memory to do so - if we do this during copy_brick_card_table and // and fail to decommit it would make the failure case very complicated to // handle. This way we can waste some decommit if we call this multiple // times before the next FGC but it's easier to handle the failure case. int gc_heap::grow_brick_card_tables (uint8_t* start, uint8_t* end, size_t size, heap_segment* new_seg, gc_heap* hp, BOOL uoh_p) { uint8_t* la = g_gc_lowest_address; uint8_t* ha = g_gc_highest_address; uint8_t* saved_g_lowest_address = min (start, g_gc_lowest_address); uint8_t* saved_g_highest_address = max (end, g_gc_highest_address); seg_mapping* new_seg_mapping_table = nullptr; #ifdef BACKGROUND_GC // This value is only for logging purpose - it's not necessarily exactly what we // would commit for mark array but close enough for diagnostics purpose. size_t logging_ma_commit_size = size_mark_array_of (0, (uint8_t*)size); #endif //BACKGROUND_GC // See if the address is already covered if ((la != saved_g_lowest_address ) || (ha != saved_g_highest_address)) { { //modify the highest address so the span covered //is twice the previous one. uint8_t* top = (uint8_t*)0 + Align (GCToOSInterface::GetVirtualMemoryLimit()); // On non-Windows systems, we get only an approximate value that can possibly be // slightly lower than the saved_g_highest_address. // In such case, we set the top to the saved_g_highest_address so that the // card and brick tables always cover the whole new range. if (top < saved_g_highest_address) { top = saved_g_highest_address; } size_t ps = ha-la; #ifdef HOST_64BIT if (ps > (uint64_t)200*1024*1024*1024) ps += (uint64_t)100*1024*1024*1024; else #endif // HOST_64BIT ps *= 2; if (saved_g_lowest_address < g_gc_lowest_address) { if (ps > (size_t)g_gc_lowest_address) saved_g_lowest_address = (uint8_t*)(size_t)OS_PAGE_SIZE; else { assert (((size_t)g_gc_lowest_address - ps) >= OS_PAGE_SIZE); saved_g_lowest_address = min (saved_g_lowest_address, (g_gc_lowest_address - ps)); } } if (saved_g_highest_address > g_gc_highest_address) { saved_g_highest_address = max ((saved_g_lowest_address + ps), saved_g_highest_address); if (saved_g_highest_address > top) saved_g_highest_address = top; } } dprintf (GC_TABLE_LOG, ("Growing card table [%Ix, %Ix[", (size_t)saved_g_lowest_address, (size_t)saved_g_highest_address)); bool write_barrier_updated = false; uint32_t virtual_reserve_flags = VirtualReserveFlags::None; uint32_t* saved_g_card_table = g_gc_card_table; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES uint32_t* saved_g_card_bundle_table = g_gc_card_bundle_table; #endif get_card_table_element_layout(saved_g_lowest_address, saved_g_highest_address, card_table_element_layout); size_t cb = 0; uint32_t* ct = 0; uint32_t* translated_ct = 0; #ifdef CARD_BUNDLE if (can_use_write_watch_for_card_table()) { cb = size_card_bundle_of (saved_g_lowest_address, saved_g_highest_address); #ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES // If we're not manually managing the card bundles, we will need to use OS write // watch APIs over this region to track changes. virtual_reserve_flags |= VirtualReserveFlags::WriteWatch; #endif } #endif //CARD_BUNDLE size_t alloc_size = card_table_element_layout[total_bookkeeping_elements]; uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size, 0, virtual_reserve_flags); if (!mem) { set_fgm_result (fgm_grow_table, alloc_size, uoh_p); goto fail; } dprintf (GC_TABLE_LOG, ("Table alloc for %Id bytes: [%Ix, %Ix[", alloc_size, (size_t)mem, (size_t)((uint8_t*)mem+alloc_size))); { // in case of background gc, the mark array will be committed separately (per segment). size_t commit_size = card_table_element_layout[seg_mapping_table_element + 1]; if (!virtual_commit (mem, commit_size, gc_oh_num::none)) { dprintf (GC_TABLE_LOG, ("Table commit failed")); set_fgm_result (fgm_commit_table, commit_size, uoh_p); goto fail; } } ct = (uint32_t*)(mem + card_table_element_layout[card_table_element]); card_table_refcount (ct) = 0; card_table_lowest_address (ct) = saved_g_lowest_address; card_table_highest_address (ct) = saved_g_highest_address; card_table_next (ct) = &g_gc_card_table[card_word (gcard_of (la))]; //clear the card table /* memclr ((uint8_t*)ct, (((saved_g_highest_address - saved_g_lowest_address)*sizeof (uint32_t) / (card_size * card_word_width)) + sizeof (uint32_t))); */ // No initialization needed, will be done in copy_brick_card card_table_brick_table (ct) = (short*)(mem + card_table_element_layout[brick_table_element]); #ifdef CARD_BUNDLE card_table_card_bundle_table (ct) = (uint32_t*)(mem + card_table_element_layout[card_bundle_table_element]); //set all bundle to look at all of the cards memset(card_table_card_bundle_table (ct), 0xFF, cb); #endif //CARD_BUNDLE new_seg_mapping_table = (seg_mapping*)(mem + card_table_element_layout[seg_mapping_table_element]); new_seg_mapping_table = (seg_mapping*)((uint8_t*)new_seg_mapping_table - size_seg_mapping_table_of (0, (align_lower_segment (saved_g_lowest_address)))); memcpy(&new_seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)], &seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)], size_seg_mapping_table_of(g_gc_lowest_address, g_gc_highest_address)); // new_seg_mapping_table gets assigned to seg_mapping_table at the bottom of this function, // not here. The reason for this is that, if we fail at mark array committing (OOM) and we've // already switched seg_mapping_table to point to the new mapping table, we'll decommit it and // run into trouble. By not assigning here, we're making sure that we will not change seg_mapping_table // if an OOM occurs. #ifdef BACKGROUND_GC if(gc_can_use_concurrent) card_table_mark_array (ct) = (uint32_t*)(mem + card_table_element_layout[mark_array_element]); else card_table_mark_array (ct) = NULL; #endif //BACKGROUND_GC translated_ct = translate_card_table (ct); dprintf (GC_TABLE_LOG, ("card table: %Ix(translated: %Ix), seg map: %Ix, mark array: %Ix", (size_t)ct, (size_t)translated_ct, (size_t)new_seg_mapping_table, (size_t)card_table_mark_array (ct))); #ifdef BACKGROUND_GC if (hp->is_bgc_in_progress()) { dprintf (GC_TABLE_LOG, ("new low: %Ix, new high: %Ix, latest mark array is %Ix(translate: %Ix)", saved_g_lowest_address, saved_g_highest_address, card_table_mark_array (ct), translate_mark_array (card_table_mark_array (ct)))); uint32_t* new_mark_array = (uint32_t*)((uint8_t*)card_table_mark_array (ct) - size_mark_array_of (0, saved_g_lowest_address)); if (!commit_new_mark_array_global (new_mark_array)) { dprintf (GC_TABLE_LOG, ("failed to commit portions in the mark array for existing segments")); set_fgm_result (fgm_commit_table, logging_ma_commit_size, uoh_p); goto fail; } if (!commit_mark_array_new_seg (hp, new_seg, translated_ct, saved_g_lowest_address)) { dprintf (GC_TABLE_LOG, ("failed to commit mark array for the new seg")); set_fgm_result (fgm_commit_table, logging_ma_commit_size, uoh_p); goto fail; } } else { clear_commit_flag_global(); } #endif //BACKGROUND_GC #if defined(FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP) && defined(BACKGROUND_GC) if (gc_can_use_concurrent) { // The current design of software write watch requires that the runtime is suspended during resize. Suspending // on resize is preferred because it is a far less frequent operation than GetWriteWatch() / ResetWriteWatch(). // Suspending here allows copying dirty state from the old table into the new table, and not have to merge old // table info lazily as done for card tables. // Either this thread was the thread that did the suspension which means we are suspended; or this is called // from a GC thread which means we are in a blocking GC and also suspended. bool is_runtime_suspended = GCToEEInterface::IsGCThread(); if (!is_runtime_suspended) { // Note on points where the runtime is suspended anywhere in this function. Upon an attempt to suspend the // runtime, a different thread may suspend first, causing this thread to block at the point of the suspend call. // So, at any suspend point, externally visible state needs to be consistent, as code that depends on that state // may run while this thread is blocked. This includes updates to g_gc_card_table, g_gc_lowest_address, and // g_gc_highest_address. suspend_EE(); } g_gc_card_table = translated_ct; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), saved_g_lowest_address); #endif SoftwareWriteWatch::SetResizedUntranslatedTable( mem + card_table_element_layout[software_write_watch_table_element], saved_g_lowest_address, saved_g_highest_address); seg_mapping_table = new_seg_mapping_table; // Since the runtime is already suspended, update the write barrier here as well. // This passes a bool telling whether we need to switch to the post // grow version of the write barrier. This test tells us if the new // segment was allocated at a lower address than the old, requiring // that we start doing an upper bounds check in the write barrier. g_gc_lowest_address = saved_g_lowest_address; g_gc_highest_address = saved_g_highest_address; stomp_write_barrier_resize(true, la != saved_g_lowest_address); write_barrier_updated = true; if (!is_runtime_suspended) { restart_EE(); } } else #endif //FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP && BACKGROUND_GC { g_gc_card_table = translated_ct; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), saved_g_lowest_address); #endif } if (!write_barrier_updated) { seg_mapping_table = new_seg_mapping_table; GCToOSInterface::FlushProcessWriteBuffers(); g_gc_lowest_address = saved_g_lowest_address; g_gc_highest_address = saved_g_highest_address; // This passes a bool telling whether we need to switch to the post // grow version of the write barrier. This test tells us if the new // segment was allocated at a lower address than the old, requiring // that we start doing an upper bounds check in the write barrier. // This will also suspend the runtime if the write barrier type needs // to be changed, so we are doing this after all global state has // been updated. See the comment above suspend_EE() above for more // info. stomp_write_barrier_resize(GCToEEInterface::IsGCThread(), la != saved_g_lowest_address); } return 0; fail: if (mem) { assert(g_gc_card_table == saved_g_card_table); #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES assert(g_gc_card_bundle_table == saved_g_card_bundle_table); #endif if (!GCToOSInterface::VirtualRelease (mem, alloc_size)) { dprintf (GC_TABLE_LOG, ("GCToOSInterface::VirtualRelease failed")); assert (!"release failed"); } } return -1; } else { #ifdef BACKGROUND_GC if (hp->is_bgc_in_progress()) { dprintf (GC_TABLE_LOG, ("in range new seg %Ix, mark_array is %Ix", new_seg, hp->mark_array)); if (!commit_mark_array_new_seg (hp, new_seg)) { dprintf (GC_TABLE_LOG, ("failed to commit mark array for the new seg in range")); set_fgm_result (fgm_commit_table, logging_ma_commit_size, uoh_p); return -1; } } #endif //BACKGROUND_GC } return 0; } //copy all of the arrays managed by the card table for a page aligned range void gc_heap::copy_brick_card_range (uint8_t* la, uint32_t* old_card_table, short* old_brick_table, uint8_t* start, uint8_t* end) { ptrdiff_t brick_offset = brick_of (start) - brick_of (la); dprintf (2, ("copying tables for range [%Ix %Ix[", (size_t)start, (size_t)end)); // copy brick table short* brick_start = &brick_table [brick_of (start)]; if (old_brick_table) { // segments are always on page boundaries memcpy (brick_start, &old_brick_table[brick_offset], size_brick_of (start, end)); } uint32_t* old_ct = &old_card_table[card_word (card_of (la))]; #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { uint32_t* old_mark_array = card_table_mark_array (old_ct); // We don't need to go through all the card tables here because // we only need to copy from the GC version of the mark array - when we // mark (even in allocate_uoh_object) we always use that mark array. if ((card_table_highest_address (old_ct) >= start) && (card_table_lowest_address (old_ct) <= end)) { if ((background_saved_highest_address >= start) && (background_saved_lowest_address <= end)) { //copy the mark bits // segments are always on page boundaries uint8_t* m_start = max (background_saved_lowest_address, start); uint8_t* m_end = min (background_saved_highest_address, end); memcpy (&mark_array[mark_word_of (m_start)], &old_mark_array[mark_word_of (m_start) - mark_word_of (la)], size_mark_array_of (m_start, m_end)); } } else { //only large segments can be out of range assert (old_brick_table == 0); } } #endif //BACKGROUND_GC // n way merge with all of the card table ever used in between uint32_t* ct = card_table_next (&card_table[card_word (card_of(lowest_address))]); assert (ct); while (card_table_next (old_ct) != ct) { //copy if old card table contained [start, end[ if ((card_table_highest_address (ct) >= end) && (card_table_lowest_address (ct) <= start)) { // or the card_tables size_t start_word = card_word (card_of (start)); uint32_t* dest = &card_table[start_word]; uint32_t* src = &((translate_card_table (ct))[start_word]); ptrdiff_t count = count_card_of (start, end); for (int x = 0; x < count; x++) { *dest |= *src; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES if (*src != 0) { card_bundle_set(cardw_card_bundle(start_word+x)); } #endif dest++; src++; } } ct = card_table_next (ct); } } void gc_heap::copy_brick_card_table() { uint32_t* old_card_table = card_table; short* old_brick_table = brick_table; uint8_t* la = lowest_address; #ifdef _DEBUG uint8_t* ha = highest_address; assert (la == card_table_lowest_address (&old_card_table[card_word (card_of (la))])); assert (ha == card_table_highest_address (&old_card_table[card_word (card_of (la))])); #endif //_DEBUG /* todo: Need a global lock for this */ uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))]; own_card_table (ct); card_table = translate_card_table (ct); /* End of global lock */ highest_address = card_table_highest_address (ct); lowest_address = card_table_lowest_address (ct); brick_table = card_table_brick_table (ct); #ifdef BACKGROUND_GC if (gc_can_use_concurrent) { mark_array = translate_mark_array (card_table_mark_array (ct)); assert (mark_word_of (g_gc_highest_address) == mark_word_of (align_on_mark_word (g_gc_highest_address))); } else mark_array = NULL; #endif //BACKGROUND_GC #ifdef CARD_BUNDLE card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct), g_gc_lowest_address); // Ensure that the word that represents g_gc_lowest_address in the translated table is located at the // start of the untranslated table. assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] == card_table_card_bundle_table (ct)); //set the card table if we are in a heap growth scenario if (card_bundles_enabled()) { card_bundles_set (cardw_card_bundle (card_word (card_of (lowest_address))), cardw_card_bundle (align_cardw_on_bundle (card_word (card_of (highest_address))))); } //check if we need to turn on card_bundles. #ifdef MULTIPLE_HEAPS // use INT64 arithmetic here because of possible overflow on 32p uint64_t th = (uint64_t)MH_TH_CARD_BUNDLE*gc_heap::n_heaps; #else // use INT64 arithmetic here because of possible overflow on 32p uint64_t th = (uint64_t)SH_TH_CARD_BUNDLE; #endif //MULTIPLE_HEAPS if (reserved_memory >= th) { enable_card_bundles(); } #endif //CARD_BUNDLE // for each of the segments and heaps, copy the brick table and // or the card table for (int i = get_start_generation_index(); i < total_generation_count; i++) { heap_segment* seg = generation_start_segment (generation_of (i)); while (seg) { if (heap_segment_read_only_p (seg) && !heap_segment_in_range_p (seg)) { //check if it became in range if ((heap_segment_reserved (seg) > lowest_address) && (heap_segment_mem (seg) < highest_address)) { set_ro_segment_in_range (seg); } } else { uint8_t* end = align_on_page (heap_segment_allocated (seg)); copy_brick_card_range (la, old_card_table, (i < uoh_start_generation) ? old_brick_table : NULL, align_lower_page (heap_segment_mem (seg)), end); } seg = heap_segment_next (seg); } } release_card_table (&old_card_table[card_word (card_of(la))]); } #ifdef FEATURE_BASICFREEZE BOOL gc_heap::insert_ro_segment (heap_segment* seg) { #ifdef FEATURE_EVENT_TRACE if (!use_frozen_segments_p) use_frozen_segments_p = true; #endif //FEATURE_EVENT_TRACE enter_spin_lock (&gc_heap::gc_lock); if (!gc_heap::seg_table->ensure_space_for_insert () #ifdef BACKGROUND_GC || (is_bgc_in_progress() && !commit_mark_array_new_seg(__this, seg)) #endif //BACKGROUND_GC ) { leave_spin_lock(&gc_heap::gc_lock); return FALSE; } //insert at the head of the segment list generation* gen2 = generation_of (max_generation); heap_segment* oldhead = generation_start_segment (gen2); heap_segment_next (seg) = oldhead; generation_start_segment (gen2) = seg; #ifdef USE_REGIONS dprintf (REGIONS_LOG, ("setting gen2 start seg to %Ix(%Ix)->%Ix", (size_t)seg, heap_segment_mem (seg), heap_segment_mem (oldhead))); if (generation_tail_ro_region (gen2) == 0) { dprintf (REGIONS_LOG, ("setting gen2 tail ro -> %Ix", heap_segment_mem (seg))); generation_tail_ro_region (gen2) = seg; } #endif //USE_REGIONS seg_table->insert (heap_segment_mem(seg), (size_t)seg); seg_mapping_table_add_ro_segment (seg); if ((heap_segment_reserved (seg) > lowest_address) && (heap_segment_mem (seg) < highest_address)) { set_ro_segment_in_range (seg); } FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(seg), (size_t)(heap_segment_reserved (seg) - heap_segment_mem(seg)), gc_etw_segment_read_only_heap); leave_spin_lock (&gc_heap::gc_lock); return TRUE; } // No one is calling this function right now. If this is getting called we need // to take care of decommitting the mark array for it - we will need to remember // which portion of the mark array was committed and only decommit that. void gc_heap::remove_ro_segment (heap_segment* seg) { //clear the mark bits so a new segment allocated in its place will have a clear mark bits #ifdef BACKGROUND_GC if (gc_can_use_concurrent) { clear_mark_array (align_lower_mark_word (max (heap_segment_mem (seg), lowest_address)), align_on_card_word (min (heap_segment_allocated (seg), highest_address)), false); // read_only segments need the mark clear } #endif //BACKGROUND_GC enter_spin_lock (&gc_heap::gc_lock); seg_table->remove (heap_segment_mem (seg)); seg_mapping_table_remove_ro_segment (seg); // Locate segment (and previous segment) in the list. generation* gen2 = generation_of (max_generation); #ifdef USE_REGIONS if (generation_tail_ro_region (gen2) == seg) { generation_tail_ro_region (gen2) = 0; } #endif //USE_REGIONS heap_segment* curr_seg = generation_start_segment (gen2); heap_segment* prev_seg = NULL; while (curr_seg && curr_seg != seg) { prev_seg = curr_seg; curr_seg = heap_segment_next (curr_seg); } assert (curr_seg == seg); // Patch previous segment (or list head if there is none) to skip the removed segment. if (prev_seg) heap_segment_next (prev_seg) = heap_segment_next (curr_seg); else generation_start_segment (gen2) = heap_segment_next (curr_seg); leave_spin_lock (&gc_heap::gc_lock); } #endif //FEATURE_BASICFREEZE BOOL gc_heap::set_ro_segment_in_range (heap_segment* seg) { seg->flags |= heap_segment_flags_inrange; ro_segments_in_range = TRUE; return TRUE; } uint8_t** make_mark_list (size_t size) { uint8_t** mark_list = new (nothrow) uint8_t* [size]; return mark_list; } #define swap(a,b){uint8_t* t; t = a; a = b; b = t;} void verify_qsort_array (uint8_t* *low, uint8_t* *high) { uint8_t **i = 0; for (i = low+1; i <= high; i++) { if (*i < *(i-1)) { FATAL_GC_ERROR(); } } } #ifndef USE_INTROSORT void qsort1( uint8_t* *low, uint8_t* *high, unsigned int depth) { if (((low + 16) >= high) || (depth > 100)) { //insertion sort uint8_t **i, **j; for (i = low+1; i <= high; i++) { uint8_t* val = *i; for (j=i;j >low && val<*(j-1);j--) { *j=*(j-1); } *j=val; } } else { uint8_t *pivot, **left, **right; //sort low middle and high if (*(low+((high-low)/2)) < *low) swap (*(low+((high-low)/2)), *low); if (*high < *low) swap (*low, *high); if (*high < *(low+((high-low)/2))) swap (*(low+((high-low)/2)), *high); swap (*(low+((high-low)/2)), *(high-1)); pivot = *(high-1); left = low; right = high-1; while (1) { while (*(--right) > pivot); while (*(++left) < pivot); if (left < right) { swap(*left, *right); } else break; } swap (*left, *(high-1)); qsort1(low, left-1, depth+1); qsort1(left+1, high, depth+1); } } #endif //USE_INTROSORT void rqsort1( uint8_t* *low, uint8_t* *high) { if ((low + 16) >= high) { //insertion sort uint8_t **i, **j; for (i = low+1; i <= high; i++) { uint8_t* val = *i; for (j=i;j >low && val>*(j-1);j--) { *j=*(j-1); } *j=val; } } else { uint8_t *pivot, **left, **right; //sort low middle and high if (*(low+((high-low)/2)) > *low) swap (*(low+((high-low)/2)), *low); if (*high > *low) swap (*low, *high); if (*high > *(low+((high-low)/2))) swap (*(low+((high-low)/2)), *high); swap (*(low+((high-low)/2)), *(high-1)); pivot = *(high-1); left = low; right = high-1; while (1) { while (*(--right) < pivot); while (*(++left) > pivot); if (left < right) { swap(*left, *right); } else break; } swap (*left, *(high-1)); rqsort1(low, left-1); rqsort1(left+1, high); } } // vxsort uses introsort as a fallback if the AVX2 instruction set is not supported #if defined(USE_INTROSORT) || defined(USE_VXSORT) class introsort { private: static const int size_threshold = 64; static const int max_depth = 100; inline static void swap_elements(uint8_t** i,uint8_t** j) { uint8_t* t=*i; *i=*j; *j=t; } public: static void sort (uint8_t** begin, uint8_t** end, int ignored) { ignored = 0; introsort_loop (begin, end, max_depth); insertionsort (begin, end); } private: static void introsort_loop (uint8_t** lo, uint8_t** hi, int depth_limit) { while (hi-lo >= size_threshold) { if (depth_limit == 0) { heapsort (lo, hi); return; } uint8_t** p=median_partition (lo, hi); depth_limit=depth_limit-1; introsort_loop (p, hi, depth_limit); hi=p-1; } } static uint8_t** median_partition (uint8_t** low, uint8_t** high) { uint8_t *pivot, **left, **right; //sort low middle and high if (*(low+((high-low)/2)) < *low) swap_elements ((low+((high-low)/2)), low); if (*high < *low) swap_elements (low, high); if (*high < *(low+((high-low)/2))) swap_elements ((low+((high-low)/2)), high); swap_elements ((low+((high-low)/2)), (high-1)); pivot = *(high-1); left = low; right = high-1; while (1) { while (*(--right) > pivot); while (*(++left) < pivot); if (left < right) { swap_elements(left, right); } else break; } swap_elements (left, (high-1)); return left; } static void insertionsort (uint8_t** lo, uint8_t** hi) { for (uint8_t** i=lo+1; i <= hi; i++) { uint8_t** j = i; uint8_t* t = *i; while((j > lo) && (t <*(j-1))) { *j = *(j-1); j--; } *j = t; } } static void heapsort (uint8_t** lo, uint8_t** hi) { size_t n = hi - lo + 1; for (size_t i=n / 2; i >= 1; i--) { downheap (i,n,lo); } for (size_t i = n; i > 1; i--) { swap_elements (lo, lo + i - 1); downheap(1, i - 1, lo); } } static void downheap (size_t i, size_t n, uint8_t** lo) { uint8_t* d = *(lo + i - 1); size_t child; while (i <= n / 2) { child = 2*i; if (child < n && *(lo + child - 1)<(*(lo + child))) { child++; } if (!(d<*(lo + child - 1))) { break; } *(lo + i - 1) = *(lo + child - 1); i = child; } *(lo + i - 1) = d; } }; #endif //defined(USE_INTROSORT) || defined(USE_VXSORT) #ifdef USE_VXSORT static void do_vxsort (uint8_t** item_array, ptrdiff_t item_count, uint8_t* range_low, uint8_t* range_high) { // above this threshold, using AVX2 for sorting will likely pay off // despite possible downclocking on some devices const size_t AVX2_THRESHOLD_SIZE = 8 * 1024; // above this threshold, using AVX51F for sorting will likely pay off // despite possible downclocking on current devices const size_t AVX512F_THRESHOLD_SIZE = 128 * 1024; if (item_count <= 1) return; if (IsSupportedInstructionSet (InstructionSet::AVX2) && (item_count > AVX2_THRESHOLD_SIZE)) { dprintf(3, ("Sorting mark lists")); // use AVX512F only if the list is large enough to pay for downclocking impact if (IsSupportedInstructionSet (InstructionSet::AVX512F) && (item_count > AVX512F_THRESHOLD_SIZE)) { do_vxsort_avx512 (item_array, &item_array[item_count - 1], range_low, range_high); } else { do_vxsort_avx2 (item_array, &item_array[item_count - 1], range_low, range_high); } } else { dprintf (3, ("Sorting mark lists")); introsort::sort (item_array, &item_array[item_count - 1], 0); } #ifdef _DEBUG // check the array is sorted for (ptrdiff_t i = 0; i < item_count - 1; i++) { assert (item_array[i] <= item_array[i + 1]); } // check that the ends of the array are indeed in range // together with the above this implies all elements are in range assert ((range_low <= item_array[0]) && (item_array[item_count - 1] <= range_high)); #endif } #endif //USE_VXSORT #ifdef MULTIPLE_HEAPS static size_t target_mark_count_for_heap (size_t total_mark_count, int heap_count, int heap_number) { // compute the average (rounded down) size_t average_mark_count = total_mark_count / heap_count; // compute the remainder size_t remaining_mark_count = total_mark_count - (average_mark_count * heap_count); // compute the target count for this heap - last heap has the remainder if (heap_number == (heap_count - 1)) return (average_mark_count + remaining_mark_count); else return average_mark_count; } NOINLINE uint8_t** gc_heap::equalize_mark_lists (size_t total_mark_list_size) { size_t local_mark_count[MAX_SUPPORTED_CPUS]; size_t total_mark_count = 0; // compute mark count per heap into a local array // compute the total for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; size_t mark_count = hp->mark_list_index - hp->mark_list; local_mark_count[i] = mark_count; total_mark_count += mark_count; } // this should agree with our input parameter assert(total_mark_count == total_mark_list_size); // compute the target count for this heap size_t this_target_mark_count = target_mark_count_for_heap (total_mark_count, n_heaps, heap_number); // if our heap has sufficient entries, we can exit early if (local_mark_count[heap_number] >= this_target_mark_count) return (mark_list + this_target_mark_count); // In the following, we try to fill the deficit in heap "deficit_heap_index" with // surplus from "surplus_heap_index". // If there is no deficit or surplus (anymore), the indices are advanced. int surplus_heap_index = 0; for (int deficit_heap_index = 0; deficit_heap_index <= heap_number; deficit_heap_index++) { // compute the target count for this heap - last heap has the remainder size_t deficit_target_mark_count = target_mark_count_for_heap (total_mark_count, n_heaps, deficit_heap_index); // if this heap has the target or larger count, skip it if (local_mark_count[deficit_heap_index] >= deficit_target_mark_count) continue; // while this heap is lower than average, fill it up while ((surplus_heap_index < n_heaps) && (local_mark_count[deficit_heap_index] < deficit_target_mark_count)) { size_t deficit = deficit_target_mark_count - local_mark_count[deficit_heap_index]; size_t surplus_target_mark_count = target_mark_count_for_heap(total_mark_count, n_heaps, surplus_heap_index); if (local_mark_count[surplus_heap_index] > surplus_target_mark_count) { size_t surplus = local_mark_count[surplus_heap_index] - surplus_target_mark_count; size_t amount_to_transfer = min(deficit, surplus); local_mark_count[surplus_heap_index] -= amount_to_transfer; if (deficit_heap_index == heap_number) { // copy amount_to_transfer mark list items memcpy(&g_heaps[deficit_heap_index]->mark_list[local_mark_count[deficit_heap_index]], &g_heaps[surplus_heap_index]->mark_list[local_mark_count[surplus_heap_index]], (amount_to_transfer*sizeof(mark_list[0]))); } local_mark_count[deficit_heap_index] += amount_to_transfer; } else { surplus_heap_index++; } } } return (mark_list + local_mark_count[heap_number]); } NOINLINE size_t gc_heap::sort_mark_list() { if ((settings.condemned_generation >= max_generation) #ifdef USE_REGIONS || (g_mark_list_piece == nullptr) #endif //USE_REGIONS ) { // fake a mark list overflow so merge_mark_lists knows to quit early mark_list_index = mark_list_end + 1; return 0; } // if this heap had a mark list overflow, we don't do anything if (mark_list_index > mark_list_end) { dprintf (2, ("h%d sort_mark_list overflow", heap_number)); mark_list_overflow = true; return 0; } // if any other heap had a mark list overflow, we fake one too, // so we don't use an incomplete mark list by mistake for (int i = 0; i < n_heaps; i++) { if (g_heaps[i]->mark_list_index > g_heaps[i]->mark_list_end) { mark_list_index = mark_list_end + 1; dprintf (2, ("h%d sort_mark_list: detected overflow on heap %d", heap_number, i)); return 0; } } // compute total mark list size and total ephemeral size size_t total_mark_list_size = 0; size_t total_ephemeral_size = 0; uint8_t* low = (uint8_t*)~0; uint8_t* high = 0; for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; total_mark_list_size += (hp->mark_list_index - hp->mark_list); #ifdef USE_REGIONS // iterate through the ephemeral regions to get a tighter bound for (int gen_num = settings.condemned_generation; gen_num >= 0; gen_num--) { generation* gen = hp->generation_of (gen_num); for (heap_segment* seg = generation_start_segment (gen); seg != nullptr; seg = heap_segment_next (seg)) { size_t ephemeral_size = heap_segment_allocated (seg) - heap_segment_mem (seg); total_ephemeral_size += ephemeral_size; low = min (low, heap_segment_mem (seg)); high = max (high, heap_segment_allocated (seg)); } } #else //USE_REGIONS size_t ephemeral_size = heap_segment_allocated (hp->ephemeral_heap_segment) - hp->gc_low; total_ephemeral_size += ephemeral_size; low = min (low, hp->gc_low); high = max (high, heap_segment_allocated (hp->ephemeral_heap_segment)); #endif //USE_REGIONS } // give up if the mark list size is unreasonably large if (total_mark_list_size > (total_ephemeral_size / 256)) { mark_list_index = mark_list_end + 1; // let's not count this as a mark list overflow dprintf (2, ("h%d total mark list %Id is too large > (%Id / 256), don't use", heap_number, total_mark_list_size, total_ephemeral_size)); mark_list_overflow = false; return 0; } uint8_t **local_mark_list_index = equalize_mark_lists (total_mark_list_size); #ifdef USE_VXSORT ptrdiff_t item_count = local_mark_list_index - mark_list; //#define WRITE_SORT_DATA #if defined(_DEBUG) || defined(WRITE_SORT_DATA) // in debug, make a copy of the mark list // for checking and debugging purposes uint8_t** mark_list_copy = &g_mark_list_copy[heap_number * mark_list_size]; uint8_t** mark_list_copy_index = &mark_list_copy[item_count]; for (ptrdiff_t i = 0; i < item_count; i++) { uint8_t* item = mark_list[i]; assert ((low <= item) && (item < high)); mark_list_copy[i] = item; } #endif // _DEBUG || WRITE_SORT_DATA do_vxsort (mark_list, item_count, low, high); #ifdef WRITE_SORT_DATA char file_name[256]; sprintf_s (file_name, ARRAY_SIZE(file_name), "sort_data_gc%d_heap%d", settings.gc_index, heap_number); FILE* f; errno_t err = fopen_s (&f, file_name, "wb"); if (err == 0) { size_t magic = 'SDAT'; if (fwrite (&magic, sizeof(magic), 1, f) != 1) dprintf (3, ("fwrite failed\n")); if (fwrite (&elapsed_cycles, sizeof(elapsed_cycles), 1, f) != 1) dprintf (3, ("fwrite failed\n")); if (fwrite (&low, sizeof(low), 1, f) != 1) dprintf (3, ("fwrite failed\n")); if (fwrite (&item_count, sizeof(item_count), 1, f) != 1) dprintf (3, ("fwrite failed\n")); if (fwrite (mark_list_copy, sizeof(mark_list_copy[0]), item_count, f) != item_count) dprintf (3, ("fwrite failed\n")); if (fwrite (&magic, sizeof(magic), 1, f) != 1) dprintf (3, ("fwrite failed\n")); if (fclose (f) != 0) dprintf (3, ("fclose failed\n")); } #endif #ifdef _DEBUG // in debug, sort the copy as well using the proven sort, so we can check we got the right result if (mark_list_copy_index > mark_list_copy) { introsort::sort (mark_list_copy, mark_list_copy_index - 1, 0); } for (ptrdiff_t i = 0; i < item_count; i++) { uint8_t* item = mark_list[i]; assert (mark_list_copy[i] == item); } #endif //_DEBUG #else //USE_VXSORT dprintf (3, ("Sorting mark lists")); if (local_mark_list_index > mark_list) { introsort::sort (mark_list, local_mark_list_index - 1, 0); } #endif //USE_VXSORT uint8_t** x = mark_list; #ifdef USE_REGIONS // first set the pieces for all regions to empty assert (g_mark_list_piece_size >= region_count); for (size_t region_index = 0; region_index < region_count; region_index++) { mark_list_piece_start[region_index] = NULL; mark_list_piece_end[region_index] = NULL; } // predicate means: x is still within the mark list, and within the bounds of this region #define predicate(x) (((x) < local_mark_list_index) && (*(x) < region_limit)) while (x < local_mark_list_index) { heap_segment* region = get_region_info_for_address (*x); // sanity check - the object on the mark list should be within the region assert ((heap_segment_mem (region) <= *x) && (*x < heap_segment_allocated (region))); size_t region_index = get_basic_region_index_for_address (heap_segment_mem (region)); uint8_t* region_limit = heap_segment_allocated (region); uint8_t*** mark_list_piece_start_ptr = &mark_list_piece_start[region_index]; uint8_t*** mark_list_piece_end_ptr = &mark_list_piece_end[region_index]; #else // USE_REGIONS // predicate means: x is still within the mark list, and within the bounds of this heap #define predicate(x) (((x) < local_mark_list_index) && (*(x) < heap->ephemeral_high)) // first set the pieces for all heaps to empty int heap_num; for (heap_num = 0; heap_num < n_heaps; heap_num++) { mark_list_piece_start[heap_num] = NULL; mark_list_piece_end[heap_num] = NULL; } heap_num = -1; while (x < local_mark_list_index) { gc_heap* heap; // find the heap x points into - searching cyclically from the last heap, // because in many cases the right heap is the next one or comes soon after #ifdef _DEBUG int last_heap_num = heap_num; #endif //_DEBUG do { heap_num++; if (heap_num >= n_heaps) heap_num = 0; assert(heap_num != last_heap_num); // we should always find the heap - infinite loop if not! heap = g_heaps[heap_num]; } while (!(*x >= heap->ephemeral_low && *x < heap->ephemeral_high)); uint8_t*** mark_list_piece_start_ptr = &mark_list_piece_start[heap_num]; uint8_t*** mark_list_piece_end_ptr = &mark_list_piece_end[heap_num]; #endif // USE_REGIONS // x is the start of the mark list piece for this heap/region *mark_list_piece_start_ptr = x; // to find the end of the mark list piece for this heap/region, find the first x // that has !predicate(x), i.e. that is either not in this heap, or beyond the end of the list if (predicate(x)) { // let's see if we get lucky and the whole rest belongs to this piece if (predicate(local_mark_list_index -1)) { x = local_mark_list_index; *mark_list_piece_end_ptr = x; break; } // we play a variant of binary search to find the point sooner. // the first loop advances by increasing steps until the predicate turns false. // then we retreat the last step, and the second loop advances by decreasing steps, keeping the predicate true. unsigned inc = 1; do { inc *= 2; uint8_t** temp_x = x; x += inc; if (temp_x > x) { break; } } while (predicate(x)); // we know that only the last step was wrong, so we undo it x -= inc; do { // loop invariant - predicate holds at x, but not x + inc assert (predicate(x) && !(((x + inc) > x) && predicate(x + inc))); inc /= 2; if (((x + inc) > x) && predicate(x + inc)) { x += inc; } } while (inc > 1); // the termination condition and the loop invariant together imply this: assert(predicate(x) && !predicate(x + inc) && (inc == 1)); // so the spot we're looking for is one further x += 1; } *mark_list_piece_end_ptr = x; } #undef predicate return total_mark_list_size; } void gc_heap::append_to_mark_list (uint8_t **start, uint8_t **end) { size_t slots_needed = end - start; size_t slots_available = mark_list_end + 1 - mark_list_index; size_t slots_to_copy = min(slots_needed, slots_available); memcpy(mark_list_index, start, slots_to_copy*sizeof(*start)); mark_list_index += slots_to_copy; dprintf (3, ("h%d: appended %Id slots to mark_list\n", heap_number, slots_to_copy)); } #ifdef _DEBUG #if !defined(_MSC_VER) #if !defined(__cdecl) #if defined(__i386__) #define __cdecl __attribute__((cdecl)) #else #define __cdecl #endif #endif #endif static int __cdecl cmp_mark_list_item (const void* vkey, const void* vdatum) { uint8_t** key = (uint8_t**)vkey; uint8_t** datum = (uint8_t**)vdatum; if (*key < *datum) return -1; else if (*key > *datum) return 1; else return 0; } #endif // _DEBUG #ifdef USE_REGIONS uint8_t** gc_heap::get_region_mark_list (uint8_t* start, uint8_t* end, uint8_t*** mark_list_end_ptr) { size_t region_number = get_basic_region_index_for_address (start); size_t source_number = region_number; #else //USE_REGIONS void gc_heap::merge_mark_lists (size_t total_mark_list_size) { // in case of mark list overflow, don't bother if (total_mark_list_size == 0) { return; } #ifdef _DEBUG // if we had more than the average number of mark list items, // make sure these got copied to another heap, i.e. didn't get lost size_t this_mark_list_size = target_mark_count_for_heap (total_mark_list_size, n_heaps, heap_number); for (uint8_t** p = mark_list + this_mark_list_size; p < mark_list_index; p++) { uint8_t* item = *p; uint8_t** found_slot = nullptr; for (int i = 0; i < n_heaps; i++) { uint8_t** heap_mark_list = &g_mark_list[i * mark_list_size]; size_t heap_mark_list_size = target_mark_count_for_heap (total_mark_list_size, n_heaps, i); found_slot = (uint8_t**)bsearch (&item, heap_mark_list, heap_mark_list_size, sizeof(item), cmp_mark_list_item); if (found_slot != nullptr) break; } assert ((found_slot != nullptr) && (*found_slot == item)); } #endif dprintf(3, ("merge_mark_lists: heap_number = %d starts out with %Id entries", heap_number, (mark_list_index - mark_list))); int source_number = heap_number; #endif //USE_REGIONS uint8_t** source[MAX_SUPPORTED_CPUS]; uint8_t** source_end[MAX_SUPPORTED_CPUS]; int source_heap[MAX_SUPPORTED_CPUS]; int source_count = 0; for (int i = 0; i < n_heaps; i++) { gc_heap* heap = g_heaps[i]; if (heap->mark_list_piece_start[source_number] < heap->mark_list_piece_end[source_number]) { source[source_count] = heap->mark_list_piece_start[source_number]; source_end[source_count] = heap->mark_list_piece_end[source_number]; source_heap[source_count] = i; if (source_count < MAX_SUPPORTED_CPUS) source_count++; } } dprintf(3, ("source_number = %d has %d sources\n", source_number, source_count)); #if defined(_DEBUG) || defined(TRACE_GC) for (int j = 0; j < source_count; j++) { dprintf(3, ("source_number = %d ", source_number)); dprintf(3, (" source from heap %d = %Ix .. %Ix (%Id entries)", (size_t)(source_heap[j]), (size_t)(source[j][0]), (size_t)(source_end[j][-1]), (size_t)(source_end[j] - source[j]))); // the sources should all be sorted for (uint8_t **x = source[j]; x < source_end[j] - 1; x++) { if (x[0] > x[1]) { dprintf(3, ("oops, mark_list from source %d for heap %d isn't sorted\n", j, source_number)); assert (0); } } } #endif //_DEBUG || TRACE_GC mark_list = &g_mark_list_copy [heap_number*mark_list_size]; mark_list_index = mark_list; mark_list_end = &mark_list [mark_list_size-1]; int piece_count = 0; if (source_count == 0) { ; // nothing to do } else if (source_count == 1) { mark_list = source[0]; mark_list_index = source_end[0]; mark_list_end = mark_list_index; piece_count++; } else { while (source_count > 1) { // find the lowest and second lowest value in the sources we're merging from int lowest_source = 0; uint8_t *lowest = *source[0]; uint8_t *second_lowest = *source[1]; for (int i = 1; i < source_count; i++) { if (lowest > *source[i]) { second_lowest = lowest; lowest = *source[i]; lowest_source = i; } else if (second_lowest > *source[i]) { second_lowest = *source[i]; } } // find the point in the lowest source where it either runs out or is not <= second_lowest anymore // let's first try to get lucky and see if the whole source is <= second_lowest -- this is actually quite common uint8_t **x; if (source_end[lowest_source][-1] <= second_lowest) x = source_end[lowest_source]; else { // use linear search to find the end -- could also use binary search as in sort_mark_list, // but saw no improvement doing that for (x = source[lowest_source]; x < source_end[lowest_source] && *x <= second_lowest; x++) ; } // blast this piece to the mark list append_to_mark_list(source[lowest_source], x); piece_count++; source[lowest_source] = x; // check whether this source is now exhausted if (x >= source_end[lowest_source]) { // if it's not the source with the highest index, copy the source with the highest index // over it so the non-empty sources are always at the beginning if (lowest_source < source_count-1) { source[lowest_source] = source[source_count-1]; source_end[lowest_source] = source_end[source_count-1]; } source_count--; } } // we're left with just one source that we copy append_to_mark_list(source[0], source_end[0]); piece_count++; } #if defined(_DEBUG) || defined(TRACE_GC) // the final mark list must be sorted for (uint8_t **x = mark_list; x < mark_list_index - 1; x++) { if (x[0] > x[1]) { dprintf(3, ("oops, mark_list for heap %d isn't sorted at the end of merge_mark_lists", heap_number)); assert (0); } } #endif //_DEBUG || TRACE_GC #ifdef USE_REGIONS *mark_list_end_ptr = mark_list_index; return mark_list; #endif // USE_REGIONS } #else #ifdef USE_REGIONS // a variant of binary search that doesn't look for an exact match, // but finds the first element >= e static uint8_t** binary_search (uint8_t** left, uint8_t** right, uint8_t* e) { if (left == right) return left; assert (left < right); uint8_t** a = left; size_t l = 0; size_t r = (size_t)(right - left); while ((r - l) >= 2) { size_t m = l + (r - l) / 2; // loop condition says that r - l is at least 2 // so l, m, r are all different assert ((l < m) && (m < r)); if (a[m] < e) { l = m; } else { r = m; } } if (a[l] < e) return a + l + 1; else return a + l; } uint8_t** gc_heap::get_region_mark_list (uint8_t* start, uint8_t* end, uint8_t*** mark_list_end_ptr) { // do a binary search over the sorted marked list to find start and end of the // mark list for this region *mark_list_end_ptr = binary_search (mark_list, mark_list_index, end); return binary_search (mark_list, *mark_list_end_ptr, start); } #endif //USE_REGIONS #endif //MULTIPLE_HEAPS void gc_heap::grow_mark_list () { // with vectorized sorting, we can use bigger mark lists #ifdef USE_VXSORT #ifdef MULTIPLE_HEAPS const size_t MAX_MARK_LIST_SIZE = IsSupportedInstructionSet (InstructionSet::AVX2) ? (1000 * 1024) : (200 * 1024); #else //MULTIPLE_HEAPS const size_t MAX_MARK_LIST_SIZE = IsSupportedInstructionSet (InstructionSet::AVX2) ? (32 * 1024) : (16 * 1024); #endif //MULTIPLE_HEAPS #else //USE_VXSORT #ifdef MULTIPLE_HEAPS const size_t MAX_MARK_LIST_SIZE = 200 * 1024; #else //MULTIPLE_HEAPS const size_t MAX_MARK_LIST_SIZE = 16 * 1024; #endif //MULTIPLE_HEAPS #endif //USE_VXSORT size_t new_mark_list_size = min (mark_list_size * 2, MAX_MARK_LIST_SIZE); if (new_mark_list_size == mark_list_size) return; #ifdef MULTIPLE_HEAPS uint8_t** new_mark_list = make_mark_list (new_mark_list_size * n_heaps); uint8_t** new_mark_list_copy = make_mark_list (new_mark_list_size * n_heaps); if ((new_mark_list != nullptr) && (new_mark_list_copy != nullptr)) { delete[] g_mark_list; g_mark_list = new_mark_list; delete[] g_mark_list_copy; g_mark_list_copy = new_mark_list_copy; mark_list_size = new_mark_list_size; } else { delete[] new_mark_list; delete[] new_mark_list_copy; } #else //MULTIPLE_HEAPS uint8_t** new_mark_list = make_mark_list (new_mark_list_size); if (new_mark_list != nullptr) { delete[] mark_list; g_mark_list = new_mark_list; mark_list_size = new_mark_list_size; } #endif //MULTIPLE_HEAPS } class seg_free_spaces { struct seg_free_space { BOOL is_plug; void* start; }; struct free_space_bucket { seg_free_space* free_space; ptrdiff_t count_add; // Assigned when we first construct the array. ptrdiff_t count_fit; // How many items left when we are fitting plugs. }; void move_bucket (int old_power2, int new_power2) { // PREFAST warning 22015: old_power2 could be negative assert (old_power2 >= 0); assert (old_power2 >= new_power2); if (old_power2 == new_power2) { return; } seg_free_space* src_index = free_space_buckets[old_power2].free_space; for (int i = old_power2; i > new_power2; i--) { seg_free_space** dest = &(free_space_buckets[i].free_space); (*dest)++; seg_free_space* dest_index = free_space_buckets[i - 1].free_space; if (i > (new_power2 + 1)) { seg_free_space temp = *src_index; *src_index = *dest_index; *dest_index = temp; } src_index = dest_index; } free_space_buckets[old_power2].count_fit--; free_space_buckets[new_power2].count_fit++; } #ifdef _DEBUG void dump_free_space (seg_free_space* item) { uint8_t* addr = 0; size_t len = 0; if (item->is_plug) { mark* m = (mark*)(item->start); len = pinned_len (m); addr = pinned_plug (m) - len; } else { heap_segment* seg = (heap_segment*)(item->start); addr = heap_segment_plan_allocated (seg); len = heap_segment_committed (seg) - addr; } dprintf (SEG_REUSE_LOG_1, ("[%d]0x%Ix %Id", heap_num, addr, len)); } void dump() { seg_free_space* item = NULL; int i = 0; dprintf (SEG_REUSE_LOG_1, ("[%d]----------------------------------\nnow the free spaces look like:", heap_num)); for (i = 0; i < (free_space_bucket_count - 1); i++) { dprintf (SEG_REUSE_LOG_1, ("[%d]Free spaces for 2^%d bucket:", heap_num, (base_power2 + i))); dprintf (SEG_REUSE_LOG_1, ("[%d]%s %s", heap_num, "start", "len")); item = free_space_buckets[i].free_space; while (item < free_space_buckets[i + 1].free_space) { dump_free_space (item); item++; } dprintf (SEG_REUSE_LOG_1, ("[%d]----------------------------------", heap_num)); } dprintf (SEG_REUSE_LOG_1, ("[%d]Free spaces for 2^%d bucket:", heap_num, (base_power2 + i))); dprintf (SEG_REUSE_LOG_1, ("[%d]%s %s", heap_num, "start", "len")); item = free_space_buckets[i].free_space; while (item <= &seg_free_space_array[free_space_item_count - 1]) { dump_free_space (item); item++; } dprintf (SEG_REUSE_LOG_1, ("[%d]----------------------------------", heap_num)); } #endif //_DEBUG free_space_bucket* free_space_buckets; seg_free_space* seg_free_space_array; ptrdiff_t free_space_bucket_count; ptrdiff_t free_space_item_count; int base_power2; int heap_num; #ifdef _DEBUG BOOL has_end_of_seg; #endif //_DEBUG public: seg_free_spaces (int h_number) { heap_num = h_number; } BOOL alloc () { size_t total_prealloc_size = MAX_NUM_BUCKETS * sizeof (free_space_bucket) + MAX_NUM_FREE_SPACES * sizeof (seg_free_space); free_space_buckets = (free_space_bucket*) new (nothrow) uint8_t[total_prealloc_size]; return (!!free_space_buckets); } // We take the ordered free space array we got from the 1st pass, // and feed the portion that we decided to use to this method, ie, // the largest item_count free spaces. void add_buckets (int base, size_t* ordered_free_spaces, int bucket_count, size_t item_count) { assert (free_space_buckets); assert (item_count <= (size_t)MAX_PTR); free_space_bucket_count = bucket_count; free_space_item_count = item_count; base_power2 = base; #ifdef _DEBUG has_end_of_seg = FALSE; #endif //_DEBUG ptrdiff_t total_item_count = 0; ptrdiff_t i = 0; seg_free_space_array = (seg_free_space*)(free_space_buckets + free_space_bucket_count); for (i = 0; i < (ptrdiff_t)item_count; i++) { seg_free_space_array[i].start = 0; seg_free_space_array[i].is_plug = FALSE; } for (i = 0; i < bucket_count; i++) { free_space_buckets[i].count_add = ordered_free_spaces[i]; free_space_buckets[i].count_fit = ordered_free_spaces[i]; free_space_buckets[i].free_space = &seg_free_space_array[total_item_count]; total_item_count += free_space_buckets[i].count_add; } assert (total_item_count == (ptrdiff_t)item_count); } // If we are adding a free space before a plug we pass the // mark stack position so we can update the length; we could // also be adding the free space after the last plug in which // case start is the segment which we'll need to update the // heap_segment_plan_allocated. void add (void* start, BOOL plug_p, BOOL first_p) { size_t size = (plug_p ? pinned_len ((mark*)start) : (heap_segment_committed ((heap_segment*)start) - heap_segment_plan_allocated ((heap_segment*)start))); if (plug_p) { dprintf (SEG_REUSE_LOG_1, ("[%d]Adding a free space before plug: %Id", heap_num, size)); } else { dprintf (SEG_REUSE_LOG_1, ("[%d]Adding a free space at end of seg: %Id", heap_num, size)); #ifdef _DEBUG has_end_of_seg = TRUE; #endif //_DEBUG } if (first_p) { size_t eph_gen_starts = gc_heap::eph_gen_starts_size; size -= eph_gen_starts; if (plug_p) { mark* m = (mark*)(start); pinned_len (m) -= eph_gen_starts; } else { heap_segment* seg = (heap_segment*)start; heap_segment_plan_allocated (seg) += eph_gen_starts; } } int bucket_power2 = index_of_highest_set_bit (size); if (bucket_power2 < base_power2) { return; } free_space_bucket* bucket = &free_space_buckets[bucket_power2 - base_power2]; seg_free_space* bucket_free_space = bucket->free_space; assert (plug_p || (!plug_p && bucket->count_add)); if (bucket->count_add == 0) { dprintf (SEG_REUSE_LOG_1, ("[%d]Already have enough of 2^%d", heap_num, bucket_power2)); return; } ptrdiff_t index = bucket->count_add - 1; dprintf (SEG_REUSE_LOG_1, ("[%d]Building free spaces: adding %Ix; len: %Id (2^%d)", heap_num, (plug_p ? (pinned_plug ((mark*)start) - pinned_len ((mark*)start)) : heap_segment_plan_allocated ((heap_segment*)start)), size, bucket_power2)); if (plug_p) { bucket_free_space[index].is_plug = TRUE; } bucket_free_space[index].start = start; bucket->count_add--; } #ifdef _DEBUG // Do a consistency check after all free spaces are added. void check() { ptrdiff_t i = 0; int end_of_seg_count = 0; for (i = 0; i < free_space_item_count; i++) { assert (seg_free_space_array[i].start); if (!(seg_free_space_array[i].is_plug)) { end_of_seg_count++; } } if (has_end_of_seg) { assert (end_of_seg_count == 1); } else { assert (end_of_seg_count == 0); } for (i = 0; i < free_space_bucket_count; i++) { assert (free_space_buckets[i].count_add == 0); } } #endif //_DEBUG uint8_t* fit (uint8_t* old_loc, size_t plug_size REQD_ALIGN_AND_OFFSET_DCL) { if (old_loc) { #ifdef SHORT_PLUGS assert (!is_plug_padded (old_loc)); #endif //SHORT_PLUGS assert (!node_realigned (old_loc)); } size_t saved_plug_size = plug_size; #ifdef FEATURE_STRUCTALIGN // BARTOKTODO (4841): this code path is disabled (see can_fit_all_blocks_p) until we take alignment requirements into account _ASSERTE(requiredAlignment == DATA_ALIGNMENT && false); #endif // FEATURE_STRUCTALIGN size_t plug_size_to_fit = plug_size; // best fit is only done for gen1 to gen2 and we do not pad in gen2. // however we must account for requirements of large alignment. // which may result in realignment padding. #ifdef RESPECT_LARGE_ALIGNMENT plug_size_to_fit += switch_alignment_size(FALSE); #endif //RESPECT_LARGE_ALIGNMENT int plug_power2 = index_of_highest_set_bit (round_up_power2 (plug_size_to_fit + Align(min_obj_size))); ptrdiff_t i; uint8_t* new_address = 0; if (plug_power2 < base_power2) { plug_power2 = base_power2; } int chosen_power2 = plug_power2 - base_power2; retry: for (i = chosen_power2; i < free_space_bucket_count; i++) { if (free_space_buckets[i].count_fit != 0) { break; } chosen_power2++; } dprintf (SEG_REUSE_LOG_1, ("[%d]Fitting plug len %Id (2^%d) using 2^%d free space", heap_num, plug_size, plug_power2, (chosen_power2 + base_power2))); assert (i < free_space_bucket_count); seg_free_space* bucket_free_space = free_space_buckets[chosen_power2].free_space; ptrdiff_t free_space_count = free_space_buckets[chosen_power2].count_fit; size_t new_free_space_size = 0; BOOL can_fit = FALSE; size_t pad = 0; for (i = 0; i < free_space_count; i++) { size_t free_space_size = 0; pad = 0; if (bucket_free_space[i].is_plug) { mark* m = (mark*)(bucket_free_space[i].start); uint8_t* plug_free_space_start = pinned_plug (m) - pinned_len (m); if (!((old_loc == 0) || same_large_alignment_p (old_loc, plug_free_space_start))) { pad = switch_alignment_size (FALSE); } plug_size = saved_plug_size + pad; free_space_size = pinned_len (m); new_address = pinned_plug (m) - pinned_len (m); if (free_space_size >= (plug_size + Align (min_obj_size)) || free_space_size == plug_size) { new_free_space_size = free_space_size - plug_size; pinned_len (m) = new_free_space_size; #ifdef SIMPLE_DPRINTF dprintf (SEG_REUSE_LOG_0, ("[%d]FP: 0x%Ix->0x%Ix(%Ix)(%Ix), [0x%Ix (2^%d) -> [0x%Ix (2^%d)", heap_num, old_loc, new_address, (plug_size - pad), pad, pinned_plug (m), index_of_highest_set_bit (free_space_size), (pinned_plug (m) - pinned_len (m)), index_of_highest_set_bit (new_free_space_size))); #endif //SIMPLE_DPRINTF if (pad != 0) { set_node_realigned (old_loc); } can_fit = TRUE; } } else { heap_segment* seg = (heap_segment*)(bucket_free_space[i].start); free_space_size = heap_segment_committed (seg) - heap_segment_plan_allocated (seg); if (!((old_loc == 0) || same_large_alignment_p (old_loc, heap_segment_plan_allocated (seg)))) { pad = switch_alignment_size (FALSE); } plug_size = saved_plug_size + pad; if (free_space_size >= (plug_size + Align (min_obj_size)) || free_space_size == plug_size) { new_address = heap_segment_plan_allocated (seg); new_free_space_size = free_space_size - plug_size; heap_segment_plan_allocated (seg) = new_address + plug_size; #ifdef SIMPLE_DPRINTF dprintf (SEG_REUSE_LOG_0, ("[%d]FS: 0x%Ix-> 0x%Ix(%Ix) (2^%d) -> 0x%Ix (2^%d)", heap_num, old_loc, new_address, (plug_size - pad), index_of_highest_set_bit (free_space_size), heap_segment_plan_allocated (seg), index_of_highest_set_bit (new_free_space_size))); #endif //SIMPLE_DPRINTF if (pad != 0) set_node_realigned (old_loc); can_fit = TRUE; } } if (can_fit) { break; } } if (!can_fit) { assert (chosen_power2 == 0); chosen_power2 = 1; goto retry; } new_address += pad; assert ((chosen_power2 && (i == 0)) || ((!chosen_power2) && (i < free_space_count))); int new_bucket_power2 = index_of_highest_set_bit (new_free_space_size); if (new_bucket_power2 < base_power2) { new_bucket_power2 = base_power2; } move_bucket (chosen_power2, new_bucket_power2 - base_power2); //dump(); return new_address; } void cleanup () { if (free_space_buckets) { delete [] free_space_buckets; } if (seg_free_space_array) { delete [] seg_free_space_array; } } }; #define marked(i) header(i)->IsMarked() #define set_marked(i) header(i)->SetMarked() #define clear_marked(i) header(i)->ClearMarked() #define pinned(i) header(i)->IsPinned() #define set_pinned(i) header(i)->SetPinned() #define clear_pinned(i) header(i)->GetHeader()->ClrGCBit(); inline size_t my_get_size (Object* ob) { MethodTable* mT = header(ob)->GetMethodTable(); return (mT->GetBaseSize() + (mT->HasComponentSize() ? ((size_t)((CObjectHeader*)ob)->GetNumComponents() * mT->RawGetComponentSize()) : 0)); } //#define size(i) header(i)->GetSize() #define size(i) my_get_size (header(i)) #define contain_pointers(i) header(i)->ContainsPointers() #ifdef COLLECTIBLE_CLASS #define contain_pointers_or_collectible(i) header(i)->ContainsPointersOrCollectible() #define get_class_object(i) GCToEEInterface::GetLoaderAllocatorObjectForGC((Object *)i) #define is_collectible(i) method_table(i)->Collectible() #else //COLLECTIBLE_CLASS #define contain_pointers_or_collectible(i) header(i)->ContainsPointers() #endif //COLLECTIBLE_CLASS #ifdef BACKGROUND_GC inline void gc_heap::seg_clear_mark_array_bits_soh (heap_segment* seg) { uint8_t* range_beg = 0; uint8_t* range_end = 0; if (bgc_mark_array_range (seg, FALSE, &range_beg, &range_end)) { clear_mark_array (range_beg, align_on_mark_word (range_end), FALSE #ifdef FEATURE_BASICFREEZE , TRUE #endif // FEATURE_BASICFREEZE ); } } void gc_heap::bgc_clear_batch_mark_array_bits (uint8_t* start, uint8_t* end) { if ((start < background_saved_highest_address) && (end > background_saved_lowest_address)) { start = max (start, background_saved_lowest_address); end = min (end, background_saved_highest_address); size_t start_mark_bit = mark_bit_of (start); size_t end_mark_bit = mark_bit_of (end); unsigned int startbit = mark_bit_bit (start_mark_bit); unsigned int endbit = mark_bit_bit (end_mark_bit); size_t startwrd = mark_bit_word (start_mark_bit); size_t endwrd = mark_bit_word (end_mark_bit); dprintf (3, ("Clearing all mark array bits between [%Ix:%Ix-[%Ix:%Ix", (size_t)start, (size_t)start_mark_bit, (size_t)end, (size_t)end_mark_bit)); unsigned int firstwrd = lowbits (~0, startbit); unsigned int lastwrd = highbits (~0, endbit); if (startwrd == endwrd) { if (startbit != endbit) { unsigned int wrd = firstwrd | lastwrd; mark_array[startwrd] &= wrd; } else { assert (start == end); } return; } // clear the first mark word. if (startbit) { mark_array[startwrd] &= firstwrd; startwrd++; } for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++) { mark_array[wrdtmp] = 0; } // clear the last mark word. if (endbit) { mark_array[endwrd] &= lastwrd; } } } #endif //BACKGROUND_GC inline BOOL gc_heap::is_mark_set (uint8_t* o) { return marked (o); } #if defined (_MSC_VER) && defined (TARGET_X86) #pragma optimize("y", on) // Small critical routines, don't put in EBP frame #endif //_MSC_VER && TARGET_X86 // return the generation number of an object. // It is assumed that the object is valid. // Note that this will return max_generation for UOH objects int gc_heap::object_gennum (uint8_t* o) { #ifdef USE_REGIONS return get_region_gen_num (o); #else if (in_range_for_segment (o, ephemeral_heap_segment) && (o >= generation_allocation_start (generation_of (max_generation - 1)))) { // in an ephemeral generation. for ( int i = 0; i < max_generation-1; i++) { if ((o >= generation_allocation_start (generation_of (i)))) return i; } return max_generation-1; } else { return max_generation; } #endif //USE_REGIONS } int gc_heap::object_gennum_plan (uint8_t* o) { #ifdef USE_REGIONS return get_region_plan_gen_num (o); #else if (in_range_for_segment (o, ephemeral_heap_segment)) { for (int i = 0; i < ephemeral_generation_count; i++) { uint8_t* plan_start = generation_plan_allocation_start (generation_of (i)); if (plan_start && (o >= plan_start)) { return i; } } } return max_generation; #endif //USE_REGIONS } #if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("", on) // Go back to command line default optimizations #endif //_MSC_VER && TARGET_X86 #ifdef USE_REGIONS void get_initial_region(int gen, int hn, uint8_t** region_start, uint8_t** region_end) { *region_start = initial_regions[hn][gen][0]; *region_end = initial_regions[hn][gen][1]; } bool gc_heap::initial_make_soh_regions (gc_heap* hp) { uint8_t* region_start; uint8_t* region_end; uint32_t hn = 0; #ifdef MULTIPLE_HEAPS hn = hp->heap_number; #endif //MULTIPLE_HEAPS for (int i = max_generation; i >= 0; i--) { get_initial_region(i, hn, &region_start, &region_end); size_t region_size = region_end - region_start; heap_segment* current_region = make_heap_segment (region_start, region_size, hp, i); if (current_region == nullptr) { return false; } uint8_t* gen_start = heap_segment_mem (current_region); make_generation (i, current_region, gen_start); if (i == 0) { ephemeral_heap_segment = current_region; alloc_allocated = heap_segment_allocated (current_region); } } for (int i = max_generation; i >= 0; i--) { dprintf (REGIONS_LOG, ("h%d gen%d alloc seg is %Ix, start seg is %Ix (%Ix-%Ix)", heap_number, i, generation_allocation_segment (generation_of (i)), generation_start_segment (generation_of (i)), heap_segment_mem (generation_start_segment (generation_of (i))), heap_segment_allocated (generation_start_segment (generation_of (i))))); } return true; } bool gc_heap::initial_make_uoh_regions (int gen, gc_heap* hp) { uint8_t* region_start; uint8_t* region_end; uint32_t hn = 0; #ifdef MULTIPLE_HEAPS hn = hp->heap_number; #endif //MULTIPLE_HEAPS get_initial_region(gen, hn, &region_start, &region_end); size_t region_size = region_end - region_start; heap_segment* uoh_region = make_heap_segment (region_start, region_size, hp, gen); if (uoh_region == nullptr) { return false; } uoh_region->flags |= (gen == loh_generation) ? heap_segment_flags_loh : heap_segment_flags_poh; uint8_t* gen_start = heap_segment_mem (uoh_region); make_generation (gen, uoh_region, gen_start); return true; } void gc_heap::clear_region_info (heap_segment* region) { if (!heap_segment_uoh_p (region)) { //cleanup the brick table back to the empty value clear_brick_table (heap_segment_mem (region), heap_segment_reserved (region)); } // we should really clear cards as well!! #ifdef BACKGROUND_GC ::record_changed_seg ((uint8_t*)region, heap_segment_reserved (region), settings.gc_index, current_bgc_state, seg_deleted); if (dt_high_memory_load_p()) { decommit_mark_array_by_seg (region); } #endif //BACKGROUND_GC } // Note that returning a region to free does not decommit. // REGIONS PERF TODO: should decommit if needed. void gc_heap::return_free_region (heap_segment* region) { clear_region_info (region); region_free_list::add_region_descending (region, free_regions); uint8_t* region_start = get_region_start (region); uint8_t* region_end = heap_segment_reserved (region); int num_basic_regions = (int)((region_end - region_start) >> min_segment_size_shr); dprintf (REGIONS_LOG, ("RETURNING region %Ix (%d basic regions) to free", heap_segment_mem (region), num_basic_regions)); for (int i = 0; i < num_basic_regions; i++) { uint8_t* basic_region_start = region_start + ((size_t)i << min_segment_size_shr); heap_segment* basic_region = get_region_info (basic_region_start); heap_segment_allocated (basic_region) = 0; #ifdef MULTIPLE_HEAPS heap_segment_heap (basic_region) = 0; #endif //MULTIPLE_HEAPS // I'm intentionally not resetting gen_num/plan_gen_num which will show us // which gen/plan gen this region was and that's useful for debugging. } } // USE_REGIONS TODO: SOH should be able to get a large region and split it up into basic regions // if needed. // USE_REGIONS TODO: In Server GC we should allow to get a free region from another heap. heap_segment* gc_heap::get_free_region (int gen_number, size_t size) { heap_segment* region = 0; if (gen_number <= max_generation) { assert (size == 0); region = free_regions[basic_free_region].unlink_region_front(); } else { const size_t LARGE_REGION_SIZE = global_region_allocator.get_large_region_alignment(); assert (size >= LARGE_REGION_SIZE); if (size == LARGE_REGION_SIZE) { // get it from the local list of large free regions if possible region = free_regions[large_free_region].unlink_region_front(); } else { // get it from the local list of huge free regions if possible region = free_regions[huge_free_region].unlink_smallest_region (size); if (region == nullptr) { ASSERT_HOLDING_SPIN_LOCK(&gc_lock); // get it from the global list of huge free regions region = global_free_huge_regions.unlink_smallest_region (size); } } } if (region) { uint8_t* region_start = get_region_start (region); uint8_t* region_end = heap_segment_reserved (region); init_heap_segment (region, __this, region_start, (region_end - region_start), gen_number); dprintf (REGIONS_LOG, ("h%d GFR get region %Ix (%Ix-%Ix) for gen%d", heap_number, (size_t)region, region_start, region_end, gen_number)); } else { // TODO: We should keep enough reserve in the free regions so we don't get OOM when // this is called within GC when we sweep. region = allocate_new_region (__this, gen_number, (gen_number > max_generation), size); } if (region) { if (!init_table_for_region (gen_number, region)) { region = 0; } } return region; } // Note that this gets the basic region index for obj. If the obj is in a large region, // this region may not be the start of it. heap_segment* gc_heap::region_of (uint8_t* obj) { size_t index = (size_t)obj >> gc_heap::min_segment_size_shr; seg_mapping* entry = &seg_mapping_table[index]; return (heap_segment*)entry; } heap_segment* gc_heap::get_region_at_index (size_t index) { index += (size_t)g_gc_lowest_address >> gc_heap::min_segment_size_shr; return (heap_segment*)(&seg_mapping_table[index]); } // For debugging purposes to check that a region looks sane and // do some logging. This was useful to sprinkle in various places // where we were threading regions. void gc_heap::check_seg_gen_num (heap_segment* seg) { #ifdef _DEBUG uint8_t* mem = heap_segment_mem (seg); if ((mem < g_gc_lowest_address) || (mem >= g_gc_highest_address)) { GCToOSInterface::DebugBreak(); } int alloc_seg_gen_num = get_region_gen_num (mem); int alloc_seg_plan_gen_num = get_region_plan_gen_num (mem); dprintf (3, ("seg %Ix->%Ix, num %d, %d", (size_t)seg, mem, alloc_seg_gen_num, alloc_seg_plan_gen_num)); #endif //_DEBUG } int gc_heap::get_region_gen_num (heap_segment* region) { return heap_segment_gen_num (region); } int gc_heap::get_region_gen_num (uint8_t* obj) { return heap_segment_gen_num (region_of (obj)); } int gc_heap::get_region_plan_gen_num (uint8_t* obj) { return heap_segment_plan_gen_num (region_of (obj)); } bool gc_heap::is_region_demoted (uint8_t* obj) { return heap_segment_demoted_p (region_of (obj)); } inline void gc_heap::set_region_gen_num (heap_segment* region, int gen_num) { assert (gen_num < (1 << (sizeof (uint8_t) * 8))); assert (gen_num >= 0); heap_segment_gen_num (region) = (uint8_t)gen_num; } inline void gc_heap::set_region_plan_gen_num (heap_segment* region, int plan_gen_num) { int gen_num = heap_segment_gen_num (region); int supposed_plan_gen_num = get_plan_gen_num (gen_num); dprintf (REGIONS_LOG, ("h%d setting plan gen on %Ix->%Ix(was gen%d) to %d(should be: %d) %s", heap_number, (size_t)region, heap_segment_mem (region), gen_num, plan_gen_num, supposed_plan_gen_num, ((plan_gen_num < supposed_plan_gen_num) ? "DEMOTED" : "ND"))); if (plan_gen_num < supposed_plan_gen_num) { if (!settings.demotion) { settings.demotion = TRUE; } get_gc_data_per_heap()->set_mechanism_bit (gc_demotion_bit); region->flags |= heap_segment_flags_demoted; } else { region->flags &= ~heap_segment_flags_demoted; } heap_segment_plan_gen_num (region) = plan_gen_num; } inline void gc_heap::set_region_plan_gen_num_sip (heap_segment* region, int plan_gen_num) { if (!heap_segment_swept_in_plan (region)) { set_region_plan_gen_num (region, plan_gen_num); } } #endif //USE_REGIONS int gc_heap::get_plan_gen_num (int gen_number) { return ((settings.promotion) ? min ((gen_number + 1), max_generation) : gen_number); } uint8_t* gc_heap::get_uoh_start_object (heap_segment* region, generation* gen) { #ifdef USE_REGIONS uint8_t* o = heap_segment_mem (region); #else uint8_t* o = generation_allocation_start (gen); assert(((CObjectHeader*)o)->IsFree()); size_t s = Align (size (o), get_alignment_constant (FALSE)); assert (s == AlignQword (min_obj_size)); //Skip the generation gap object o += s; #endif //USE_REGIONS return o; } uint8_t* gc_heap::get_soh_start_object (heap_segment* region, generation* gen) { #ifdef USE_REGIONS uint8_t* o = heap_segment_mem (region); #else uint8_t* o = generation_allocation_start (gen); #endif //USE_REGIONS return o; } size_t gc_heap::get_soh_start_obj_len (uint8_t* start_obj) { #ifdef USE_REGIONS return 0; #else return Align (size (start_obj)); #endif //USE_REGIONS } void gc_heap::clear_gen1_cards() { #if defined(_DEBUG) && !defined(USE_REGIONS) for (int x = 0; x <= max_generation; x++) { assert (generation_allocation_start (generation_of (x))); } #endif //_DEBUG && !USE_REGIONS if (!settings.demotion && settings.promotion) { //clear card for generation 1. generation 0 is empty #ifdef USE_REGIONS heap_segment* region = generation_start_segment (generation_of (1)); while (region) { clear_card_for_addresses (heap_segment_mem (region), heap_segment_allocated (region)); region = heap_segment_next (region); } #else //USE_REGIONS clear_card_for_addresses ( generation_allocation_start (generation_of (1)), generation_allocation_start (generation_of (0))); #endif //USE_REGIONS #ifdef _DEBUG uint8_t* start = get_soh_start_object (ephemeral_heap_segment, youngest_generation); assert (heap_segment_allocated (ephemeral_heap_segment) == (start + get_soh_start_obj_len (start))); #endif //_DEBUG } } heap_segment* gc_heap::make_heap_segment (uint8_t* new_pages, size_t size, gc_heap* hp, int gen_num) { gc_oh_num oh = gen_to_oh (gen_num); size_t initial_commit = SEGMENT_INITIAL_COMMIT; int h_number = #ifdef MULTIPLE_HEAPS hp->heap_number; #else 0; #endif //MULTIPLE_HEAPS if (!virtual_commit (new_pages, initial_commit, oh, h_number)) { return 0; } #ifdef USE_REGIONS dprintf (REGIONS_LOG, ("Making region %Ix->%Ix(%Idmb)", new_pages, (new_pages + size), (size / 1024 / 1024))); heap_segment* new_segment = get_region_info (new_pages); uint8_t* start = new_pages + sizeof (aligned_plug_and_gap); #else heap_segment* new_segment = (heap_segment*)new_pages; uint8_t* start = new_pages + segment_info_size; #endif //USE_REGIONS heap_segment_mem (new_segment) = start; heap_segment_used (new_segment) = start; heap_segment_reserved (new_segment) = new_pages + size; heap_segment_committed (new_segment) = (use_large_pages_p ? heap_segment_reserved(new_segment) : (new_pages + initial_commit)); init_heap_segment (new_segment, hp #ifdef USE_REGIONS , new_pages, size, gen_num #endif //USE_REGIONS ); dprintf (2, ("Creating heap segment %Ix", (size_t)new_segment)); return new_segment; } void gc_heap::init_heap_segment (heap_segment* seg, gc_heap* hp #ifdef USE_REGIONS , uint8_t* start, size_t size, int gen_num #endif //USE_REGIONS ) { seg->flags = 0; heap_segment_next (seg) = 0; heap_segment_plan_allocated (seg) = heap_segment_mem (seg); heap_segment_allocated (seg) = heap_segment_mem (seg); heap_segment_saved_allocated (seg) = heap_segment_mem (seg); heap_segment_decommit_target (seg) = heap_segment_reserved (seg); #ifdef BACKGROUND_GC heap_segment_background_allocated (seg) = 0; heap_segment_saved_bg_allocated (seg) = 0; #endif //BACKGROUND_GC #ifdef MULTIPLE_HEAPS heap_segment_heap (seg) = hp; #endif //MULTIPLE_HEAPS #ifdef USE_REGIONS int gen_num_for_region = min (gen_num, max_generation); heap_segment_gen_num (seg) = (uint8_t)gen_num_for_region; heap_segment_plan_gen_num (seg) = gen_num_for_region; heap_segment_swept_in_plan (seg) = false; #endif //USE_REGIONS #ifdef USE_REGIONS int num_basic_regions = (int)(size >> min_segment_size_shr); size_t basic_region_size = (size_t)1 << min_segment_size_shr; dprintf (REGIONS_LOG, ("this region contains %d basic regions", num_basic_regions)); if (num_basic_regions > 1) { for (int i = 1; i < num_basic_regions; i++) { uint8_t* basic_region_start = start + (i * basic_region_size); heap_segment* basic_region = get_region_info (basic_region_start); heap_segment_allocated (basic_region) = (uint8_t*)(ptrdiff_t)-i; dprintf (REGIONS_LOG, ("Initing basic region %Ix->%Ix(%Idmb) alloc to %Ix", basic_region_start, (basic_region_start + basic_region_size), (size_t)(basic_region_size / 1024 / 1024), heap_segment_allocated (basic_region))); heap_segment_gen_num (basic_region) = (uint8_t)gen_num_for_region; heap_segment_plan_gen_num (basic_region) = gen_num_for_region; #ifdef MULTIPLE_HEAPS heap_segment_heap (basic_region) = hp; #endif //MULTIPLE_HEAPS } } #endif //USE_REGIONS } //Releases the segment to the OS. // this is always called on one thread only so calling seg_table->remove is fine. void gc_heap::delete_heap_segment (heap_segment* seg, BOOL consider_hoarding) { if (!heap_segment_uoh_p (seg)) { //cleanup the brick table back to the empty value clear_brick_table (heap_segment_mem (seg), heap_segment_reserved (seg)); } #ifdef USE_REGIONS return_free_region (seg); #else // USE_REGIONS if (consider_hoarding) { assert ((heap_segment_mem (seg) - (uint8_t*)seg) <= ptrdiff_t(2*OS_PAGE_SIZE)); size_t ss = (size_t) (heap_segment_reserved (seg) - (uint8_t*)seg); //Don't keep the big ones. if (ss <= INITIAL_ALLOC) { dprintf (2, ("Hoarding segment %Ix", (size_t)seg)); #ifdef BACKGROUND_GC // We don't need to clear the decommitted flag because when this segment is used // for a new segment the flags will be cleared. if (!heap_segment_decommitted_p (seg)) #endif //BACKGROUND_GC { decommit_heap_segment (seg); } seg_mapping_table_remove_segment (seg); heap_segment_next (seg) = segment_standby_list; segment_standby_list = seg; seg = 0; } } if (seg != 0) { dprintf (2, ("h%d: del seg: [%Ix, %Ix[", heap_number, (size_t)seg, (size_t)(heap_segment_reserved (seg)))); #ifdef BACKGROUND_GC ::record_changed_seg ((uint8_t*)seg, heap_segment_reserved (seg), settings.gc_index, current_bgc_state, seg_deleted); decommit_mark_array_by_seg (seg); #endif //BACKGROUND_GC seg_mapping_table_remove_segment (seg); release_segment (seg); } #endif //USE_REGIONS } //resets the pages beyond allocates size so they won't be swapped out and back in void gc_heap::reset_heap_segment_pages (heap_segment* seg) { size_t page_start = align_on_page ((size_t)heap_segment_allocated (seg)); size_t size = (size_t)heap_segment_committed (seg) - page_start; if (size != 0) GCToOSInterface::VirtualReset((void*)page_start, size, false /* unlock */); } void gc_heap::decommit_heap_segment_pages (heap_segment* seg, size_t extra_space) { if (use_large_pages_p) return; uint8_t* page_start = align_on_page (heap_segment_allocated(seg)); assert (heap_segment_committed (seg) >= page_start); size_t size = heap_segment_committed (seg) - page_start; extra_space = align_on_page (extra_space); if (size >= max ((extra_space + 2*OS_PAGE_SIZE), MIN_DECOMMIT_SIZE)) { page_start += max(extra_space, 32*OS_PAGE_SIZE); decommit_heap_segment_pages_worker (seg, page_start); } } size_t gc_heap::decommit_heap_segment_pages_worker (heap_segment* seg, uint8_t* new_committed) { assert (!use_large_pages_p); uint8_t* page_start = align_on_page (new_committed); ptrdiff_t size = heap_segment_committed (seg) - page_start; if (size > 0) { bool decommit_succeeded_p = virtual_decommit (page_start, (size_t)size, heap_segment_oh (seg), heap_number); if (decommit_succeeded_p) { dprintf (3, ("Decommitting heap segment [%Ix, %Ix[(%d)", (size_t)page_start, (size_t)(page_start + size), size)); heap_segment_committed (seg) = page_start; if (heap_segment_used (seg) > heap_segment_committed (seg)) { heap_segment_used (seg) = heap_segment_committed (seg); } } else { dprintf (3, ("Decommitting heap segment failed")); } } return size; } //decommit all pages except one or 2 void gc_heap::decommit_heap_segment (heap_segment* seg) { #ifdef USE_REGIONS if (!dt_high_memory_load_p()) { return; } #endif uint8_t* page_start = align_on_page (heap_segment_mem (seg)); dprintf (3, ("Decommitting heap segment %Ix(%Ix)", (size_t)seg, heap_segment_mem (seg))); #if defined(BACKGROUND_GC) && !defined(USE_REGIONS) page_start += OS_PAGE_SIZE; #endif //BACKGROUND_GC && !USE_REGIONS assert (heap_segment_committed (seg) >= page_start); size_t size = heap_segment_committed (seg) - page_start; bool decommit_succeeded_p = virtual_decommit (page_start, size, heap_segment_oh (seg), heap_number); if (decommit_succeeded_p) { //re-init the segment object heap_segment_committed (seg) = page_start; if (heap_segment_used (seg) > heap_segment_committed (seg)) { heap_segment_used (seg) = heap_segment_committed (seg); } } } void gc_heap::clear_gen0_bricks() { if (!gen0_bricks_cleared) { gen0_bricks_cleared = TRUE; //initialize brick table for gen 0 #ifdef USE_REGIONS heap_segment* gen0_region = generation_start_segment (generation_of (0)); while (gen0_region) { uint8_t* clear_start = heap_segment_mem (gen0_region); #else heap_segment* gen0_region = ephemeral_heap_segment; uint8_t* clear_start = generation_allocation_start (generation_of (0)); { #endif //USE_REGIONS for (size_t b = brick_of (clear_start); b < brick_of (align_on_brick (heap_segment_allocated (gen0_region))); b++) { set_brick (b, -1); } #ifdef USE_REGIONS gen0_region = heap_segment_next (gen0_region); #endif //USE_REGIONS } } } void gc_heap::check_gen0_bricks() { //#ifdef _DEBUG if (gen0_bricks_cleared) { #ifdef USE_REGIONS heap_segment* gen0_region = generation_start_segment (generation_of (0)); while (gen0_region) { uint8_t* start = heap_segment_mem (gen0_region); #else heap_segment* gen0_region = ephemeral_heap_segment; uint8_t* start = generation_allocation_start (generation_of (0)); { #endif //USE_REGIONS size_t end_b = brick_of (heap_segment_allocated (gen0_region)); for (size_t b = brick_of (start); b < end_b; b++) { assert (brick_table[b] != 0); if (brick_table[b] == 0) { GCToOSInterface::DebugBreak(); } } #ifdef USE_REGIONS gen0_region = heap_segment_next (gen0_region); #endif //USE_REGIONS } } //#endif //_DEBUG } #ifdef BACKGROUND_GC void gc_heap::rearrange_small_heap_segments() { heap_segment* seg = freeable_soh_segment; while (seg) { heap_segment* next_seg = heap_segment_next (seg); // TODO: we need to consider hoarding here. delete_heap_segment (seg, FALSE); seg = next_seg; } freeable_soh_segment = 0; } #endif //BACKGROUND_GC void gc_heap::rearrange_uoh_segments() { dprintf (2, ("deleting empty large segments")); heap_segment* seg = freeable_uoh_segment; while (seg) { heap_segment* next_seg = heap_segment_next (seg); delete_heap_segment (seg, GCConfig::GetRetainVM()); seg = next_seg; } freeable_uoh_segment = 0; } #ifndef USE_REGIONS void gc_heap::rearrange_heap_segments(BOOL compacting) { heap_segment* seg = generation_start_segment (generation_of (max_generation)); heap_segment* prev_seg = 0; heap_segment* next_seg = 0; while (seg) { next_seg = heap_segment_next (seg); //link ephemeral segment when expanding if ((next_seg == 0) && (seg != ephemeral_heap_segment)) { seg->next = ephemeral_heap_segment; next_seg = heap_segment_next (seg); } //re-used expanded heap segment if ((seg == ephemeral_heap_segment) && next_seg) { heap_segment_next (prev_seg) = next_seg; heap_segment_next (seg) = 0; } else { uint8_t* end_segment = (compacting ? heap_segment_plan_allocated (seg) : heap_segment_allocated (seg)); // check if the segment was reached by allocation if ((end_segment == heap_segment_mem (seg))&& !heap_segment_read_only_p (seg)) { //if not, unthread and delete assert (prev_seg); assert (seg != ephemeral_heap_segment); heap_segment_next (prev_seg) = next_seg; delete_heap_segment (seg, GCConfig::GetRetainVM()); dprintf (2, ("Deleting heap segment %Ix", (size_t)seg)); } else { if (!heap_segment_read_only_p (seg)) { if (compacting) { heap_segment_allocated (seg) = heap_segment_plan_allocated (seg); } // reset the pages between allocated and committed. if (seg != ephemeral_heap_segment) { decommit_heap_segment_pages (seg, 0); } } prev_seg = seg; } } seg = next_seg; } } #endif //!USE_REGIONS #if defined(USE_REGIONS) // trim down the list of free regions pointed at by free_list down to target_count, moving the extra ones to surplus_list static void remove_surplus_regions (region_free_list* free_list, region_free_list* surplus_list, size_t target_count) { while (free_list->get_num_free_regions() > target_count) { // remove one region from the heap's free list heap_segment* region = free_list->unlink_region_front(); // and put it on the surplus list surplus_list->add_region_front (region); } } // add regions from surplus_list to free_list, trying to reach target_count static int64_t add_regions (region_free_list* free_list, region_free_list* surplus_list, size_t target_count) { int64_t added_count = 0; while (free_list->get_num_free_regions() < target_count) { if (surplus_list->get_num_free_regions() == 0) break; added_count++; // remove one region from the surplus list heap_segment* region = surplus_list->unlink_region_front(); // and put it on the heap's free list free_list->add_region_front (region); } return added_count; } region_free_list::region_free_list() : num_free_regions (0), size_free_regions (0), size_committed_in_free_regions (0), num_free_regions_added (0), num_free_regions_removed (0), head_free_region (nullptr), tail_free_region (nullptr) { } void region_free_list::verify (bool empty_p) { #ifdef _DEBUG assert ((num_free_regions == 0) == empty_p); assert ((size_free_regions == 0) == empty_p); assert ((size_committed_in_free_regions == 0) == empty_p); assert ((head_free_region == nullptr) == empty_p); assert ((tail_free_region == nullptr) == empty_p); assert (num_free_regions == (num_free_regions_added - num_free_regions_removed)); if (!empty_p) { assert (heap_segment_next (tail_free_region) == nullptr); assert (heap_segment_prev_free_region (head_free_region) == nullptr); size_t actual_count = 0; heap_segment* last_region = nullptr; for (heap_segment* region = head_free_region; region != nullptr; region = heap_segment_next(region)) { last_region = region; actual_count++; } assert (num_free_regions == actual_count); assert (last_region == tail_free_region); heap_segment* first_region = nullptr; for (heap_segment* region = tail_free_region; region != nullptr; region = heap_segment_prev_free_region(region)) { first_region = region; actual_count--; } assert (actual_count == 0); assert (head_free_region == first_region); } #endif } void region_free_list::reset() { num_free_regions = 0; size_free_regions = 0; size_committed_in_free_regions = 0; head_free_region = nullptr; tail_free_region = nullptr; } inline void region_free_list::update_added_region_info (heap_segment* region) { num_free_regions++; num_free_regions_added++; size_t region_size = get_region_size (region); size_free_regions += region_size; size_t region_committed_size = get_region_committed_size (region); size_committed_in_free_regions += region_committed_size; verify (false); } void region_free_list::add_region_front (heap_segment* region) { assert (heap_segment_containing_free_list (region) == nullptr); heap_segment_containing_free_list(region) = this; if (head_free_region != nullptr) { heap_segment_prev_free_region(head_free_region) = region; assert (tail_free_region != nullptr); } else { tail_free_region = region; } heap_segment_next (region) = head_free_region; head_free_region = region; heap_segment_prev_free_region (region) = nullptr; update_added_region_info (region); } // This inserts fully committed regions at the head, otherwise it goes backward in the list till // we find a region whose committed size is >= this region's committed or we reach the head. void region_free_list::add_region_in_descending_order (heap_segment* region_to_add) { assert (heap_segment_containing_free_list (region_to_add) == nullptr); heap_segment_containing_free_list (region_to_add) = this; heap_segment_age_in_free (region_to_add) = 0; heap_segment* prev_region = nullptr; heap_segment* region = nullptr; // if the region is fully committed, it's inserted at the front if (heap_segment_committed (region_to_add) == heap_segment_reserved (region_to_add)) { region = head_free_region; } else { // otherwise we search backwards for a good insertion spot // most regions at the front are fully committed and thus boring to search size_t region_to_add_committed = get_region_committed_size (region_to_add); for (prev_region = tail_free_region; prev_region != nullptr; prev_region = heap_segment_prev_free_region (prev_region)) { size_t prev_region_committed = get_region_committed_size (prev_region); if (prev_region_committed >= region_to_add_committed) { break; } region = prev_region; } } if (prev_region != nullptr) { heap_segment_next (prev_region) = region_to_add; } else { assert (region == head_free_region); head_free_region = region_to_add; } heap_segment_prev_free_region (region_to_add) = prev_region; heap_segment_next (region_to_add) = region; if (region != nullptr) { heap_segment_prev_free_region (region) = region_to_add; } else { assert (prev_region == tail_free_region); tail_free_region = region_to_add; } update_added_region_info (region_to_add); } heap_segment* region_free_list::unlink_region_front() { heap_segment* region = head_free_region; if (region != nullptr) { assert (heap_segment_containing_free_list (region) == this); unlink_region (region); } return region; } void region_free_list::unlink_region (heap_segment* region) { region_free_list* rfl = heap_segment_containing_free_list (region); rfl->verify (false); heap_segment* prev = heap_segment_prev_free_region (region); heap_segment* next = heap_segment_next (region); if (prev != nullptr) { assert (region != rfl->head_free_region); assert (heap_segment_next (prev) == region); heap_segment_next (prev) = next; } else { assert (region == rfl->head_free_region); rfl->head_free_region = next; } if (next != nullptr) { assert (region != rfl->tail_free_region); assert (heap_segment_prev_free_region (next) == region); heap_segment_prev_free_region (next) = prev; } else { assert (region == rfl->tail_free_region); rfl->tail_free_region = prev; } heap_segment_containing_free_list (region) = nullptr; rfl->num_free_regions--; rfl->num_free_regions_removed++; size_t region_size = get_region_size (region); assert (rfl->size_free_regions >= region_size); rfl->size_free_regions -= region_size; size_t region_committed_size = get_region_committed_size (region); assert (rfl->size_committed_in_free_regions >= region_committed_size); rfl->size_committed_in_free_regions -= region_committed_size; } free_region_kind region_free_list::get_region_kind (heap_segment* region) { const size_t BASIC_REGION_SIZE = global_region_allocator.get_region_alignment(); const size_t LARGE_REGION_SIZE = global_region_allocator.get_large_region_alignment(); size_t region_size = get_region_size (region); if (region_size == BASIC_REGION_SIZE) return basic_free_region; else if (region_size == LARGE_REGION_SIZE) return large_free_region; else { assert(region_size > LARGE_REGION_SIZE); return huge_free_region; } } heap_segment* region_free_list::unlink_smallest_region (size_t minimum_size) { verify (num_free_regions == 0); // look for the smallest region that is large enough heap_segment* smallest_region = nullptr; size_t smallest_size = (size_t)-1; for (heap_segment* region = head_free_region; region != nullptr; region = heap_segment_next (region)) { uint8_t* region_start = get_region_start(region); uint8_t* region_end = heap_segment_reserved(region); size_t region_size = get_region_size (region); const size_t LARGE_REGION_SIZE = global_region_allocator.get_large_region_alignment(); assert (region_size >= LARGE_REGION_SIZE * 2); if (region_size >= minimum_size) { // found a region that is large enough - see if it's smaller than the smallest so far if (smallest_size > region_size) { smallest_size = region_size; smallest_region = region; } // is the region's size equal to the minimum on this list? if (region_size == LARGE_REGION_SIZE * 2) { // we won't find a smaller one on this list assert (region == smallest_region); break; } } } if (smallest_region != nullptr) { unlink_region (smallest_region); dprintf(REGIONS_LOG, ("get %Ix-%Ix-%Ix", heap_segment_mem(smallest_region), heap_segment_committed(smallest_region), heap_segment_used(smallest_region))); } return smallest_region; } void region_free_list::transfer_regions (region_free_list* from) { this->verify (this->num_free_regions == 0); from->verify (from->num_free_regions == 0); if (from->num_free_regions == 0) { // the from list is empty return; } if (num_free_regions == 0) { // this list is empty head_free_region = from->head_free_region; tail_free_region = from->tail_free_region; } else { // both free lists are non-empty // attach the from list at the tail heap_segment* this_tail = tail_free_region; heap_segment* from_head = from->head_free_region; heap_segment_next (this_tail) = from_head; heap_segment_prev_free_region (from_head) = this_tail; tail_free_region = from->tail_free_region; } for (heap_segment* region = from->head_free_region; region != nullptr; region = heap_segment_next (region)) { heap_segment_containing_free_list (region) = this; } num_free_regions += from->num_free_regions; num_free_regions_added += from->num_free_regions; size_free_regions += from->size_free_regions; size_committed_in_free_regions += from->size_committed_in_free_regions; from->num_free_regions_removed += from->num_free_regions; from->reset(); verify (false); } size_t region_free_list::get_num_free_regions() { #ifdef _DEBUG verify (num_free_regions == 0); #endif //_DEBUG return num_free_regions; } void region_free_list::add_region (heap_segment* region, region_free_list to_free_list[count_free_region_kinds]) { free_region_kind kind = get_region_kind (region); to_free_list[kind].add_region_front (region); } void region_free_list::add_region_descending (heap_segment* region, region_free_list to_free_list[count_free_region_kinds]) { free_region_kind kind = get_region_kind (region); to_free_list[kind].add_region_in_descending_order (region); } void region_free_list::age_free_regions() { for (heap_segment* region = head_free_region; region != nullptr; region = heap_segment_next (region)) { // only age to 99... that's enough for us to decommit this. if (heap_segment_age_in_free (region) < MAX_AGE_IN_FREE) heap_segment_age_in_free (region)++; } } void region_free_list::age_free_regions (region_free_list free_lists[count_free_region_kinds]) { for (int kind = basic_free_region; kind < count_free_region_kinds; kind++) { free_lists[kind].age_free_regions(); } } void region_free_list::print (int hn, const char* msg, int* ages) { dprintf (3, ("h%2d PRINTING-------------------------------", hn)); for (heap_segment* region = head_free_region; region != nullptr; region = heap_segment_next (region)) { if (ages) { ages[heap_segment_age_in_free (region)]++; } dprintf (3, ("[%s] h%2d age %d region %Ix (%Id)%s", msg, hn, (int)heap_segment_age_in_free (region), heap_segment_mem (region), get_region_committed_size (region), ((heap_segment_committed (region) == heap_segment_reserved (region)) ? "(FC)" : ""))); } dprintf (3, ("h%2d PRINTING END-------------------------------", hn)); } void region_free_list::print (region_free_list free_lists[count_free_region_kinds], int hn, const char* msg, int* ages) { for (int kind = basic_free_region; kind < count_free_region_kinds; kind++) { free_lists[kind].print (hn, msg, ages); } } static int compare_by_committed_and_age (heap_segment* l, heap_segment* r) { size_t l_committed = get_region_committed_size (l); size_t r_committed = get_region_committed_size (r); if (l_committed > r_committed) return -1; else if (l_committed < r_committed) return 1; int l_age = heap_segment_age_in_free (l); int r_age = heap_segment_age_in_free (r); return (l_age - r_age); } static heap_segment* merge_sort_by_committed_and_age (heap_segment *head, size_t count) { if (count <= 1) return head; size_t half = count / 2; heap_segment* mid = nullptr; size_t i = 0; for (heap_segment *region = head; region != nullptr; region = heap_segment_next (region)) { i++; if (i == half) { mid = heap_segment_next (region); heap_segment_next (region) = nullptr; break; } } head = merge_sort_by_committed_and_age (head, half); mid = merge_sort_by_committed_and_age (mid, count - half); heap_segment* new_head; if (compare_by_committed_and_age (head, mid) <= 0) { new_head = head; head = heap_segment_next (head); } else { new_head = mid; mid = heap_segment_next (mid); } heap_segment* new_tail = new_head; while ((head != nullptr) && (mid != nullptr)) { heap_segment* region = nullptr; if (compare_by_committed_and_age (head, mid) <= 0) { region = head; head = heap_segment_next (head); } else { region = mid; mid = heap_segment_next (mid); } heap_segment_next (new_tail) = region; new_tail = region; } if (head != nullptr) { assert (mid == nullptr); heap_segment_next (new_tail) = head; } else { heap_segment_next (new_tail) = mid; } return new_head; } void region_free_list::sort_by_committed_and_age() { if (num_free_regions <= 1) return; heap_segment* new_head = merge_sort_by_committed_and_age (head_free_region, num_free_regions); // need to set head, tail, and all the prev links again head_free_region = new_head; heap_segment* prev = nullptr; for (heap_segment* region = new_head; region != nullptr; region = heap_segment_next (region)) { heap_segment_prev_free_region (region) = prev; assert ((prev == nullptr) || (compare_by_committed_and_age (prev, region) <= 0)); prev = region; } tail_free_region = prev; } #endif //USE_REGIONS void gc_heap::distribute_free_regions() { #ifdef USE_REGIONS const int kind_count = large_free_region + 1; // first step: accumulate the number of free regions and the budget over all heaps // and move huge regions to global free list size_t total_num_free_regions[kind_count] = { 0, 0 }; size_t total_budget_in_region_units[kind_count] = { 0, 0 }; size_t num_decommit_regions_by_time = 0; size_t size_decommit_regions_by_time = 0; size_t heap_budget_in_region_units[MAX_SUPPORTED_CPUS][kind_count]; size_t region_size[kind_count] = { global_region_allocator.get_region_alignment(), global_region_allocator.get_large_region_alignment() }; region_free_list surplus_regions[kind_count]; for (int kind = basic_free_region; kind < kind_count; kind++) { // we may still have regions left on the regions_to_decommit list - // use these to fill the budget as well surplus_regions[kind].transfer_regions (&global_regions_to_decommit[kind]); } #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; // just to reduce the number of #ifdefs in the code below const int i = 0; #endif //MULTIPLE_HEAPS for (int kind = basic_free_region; kind < kind_count; kind++) { // If there are regions in free that haven't been used in AGE_IN_FREE_TO_DECOMMIT GCs we always decommit them. region_free_list& region_list = hp->free_regions[kind]; heap_segment* next_region = nullptr; for (heap_segment* region = region_list.get_first_free_region(); region != nullptr; region = next_region) { next_region = heap_segment_next (region); if (heap_segment_age_in_free (region) >= AGE_IN_FREE_TO_DECOMMIT) { num_decommit_regions_by_time++; size_decommit_regions_by_time += get_region_committed_size (region); dprintf (REGIONS_LOG, ("h%2d region %Ix age %2d, decommit", i, heap_segment_mem (region), heap_segment_age_in_free (region))); region_free_list::unlink_region (region); region_free_list::add_region (region, global_regions_to_decommit); } } total_num_free_regions[kind] += region_list.get_num_free_regions(); } global_free_huge_regions.transfer_regions (&hp->free_regions[huge_free_region]); heap_budget_in_region_units[i][basic_free_region] = 0; heap_budget_in_region_units[i][large_free_region] = 0; for (int gen = soh_gen0; gen < total_generation_count; gen++) { ptrdiff_t budget_gen = max (hp->estimate_gen_growth (gen), 0); int kind = gen >= loh_generation; size_t budget_gen_in_region_units = (budget_gen + (region_size[kind] - 1)) / region_size[kind]; dprintf (REGIONS_LOG, ("h%2d gen %d has an estimated growth of %Id bytes (%Id regions)", i, gen, budget_gen, budget_gen_in_region_units)); heap_budget_in_region_units[i][kind] += budget_gen_in_region_units; total_budget_in_region_units[kind] += budget_gen_in_region_units; } } dprintf (1, ("moved %2d regions (%8Id) to decommit based on time", num_decommit_regions_by_time, size_decommit_regions_by_time)); global_free_huge_regions.transfer_regions (&global_regions_to_decommit[huge_free_region]); size_t free_space_in_huge_regions = global_free_huge_regions.get_size_free_regions(); ptrdiff_t num_regions_to_decommit[kind_count]; int region_factor[kind_count] = { 1, LARGE_REGION_FACTOR }; #ifdef TRACE_GC const char* kind_name[count_free_region_kinds] = { "basic", "large", "huge"}; #endif // TRACE_GC #ifndef MULTIPLE_HEAPS // just to reduce the number of #ifdefs in the code below const int n_heaps = 1; #endif //!MULTIPLE_HEAPS size_t num_huge_region_units_to_consider[kind_count] = { 0, free_space_in_huge_regions / region_size[large_free_region] }; for (int kind = basic_free_region; kind < kind_count; kind++) { num_regions_to_decommit[kind] = surplus_regions[kind].get_num_free_regions(); dprintf(REGIONS_LOG, ("%Id %s free regions, %Id regions budget, %Id regions on decommit list, %Id huge regions to consider", total_num_free_regions[kind], kind_name[kind], total_budget_in_region_units[kind], num_regions_to_decommit[kind], num_huge_region_units_to_consider[kind])); // check if the free regions exceed the budget // if so, put the highest free regions on the decommit list total_num_free_regions[kind] += num_regions_to_decommit[kind]; ptrdiff_t balance = total_num_free_regions[kind] + num_huge_region_units_to_consider[kind] - total_budget_in_region_units[kind]; if ( #ifdef BACKGROUND_GC background_running_p() || #endif (balance < 0)) { dprintf (REGIONS_LOG, ("distributing the %Id %s regions deficit", -balance, kind_name[kind])); // we may have a deficit or - if background GC is going on - a surplus. // adjust the budget per heap accordingly ptrdiff_t adjustment_per_heap = (balance + (n_heaps - 1)) / n_heaps; #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { ptrdiff_t new_budget = (ptrdiff_t)heap_budget_in_region_units[i][kind] + adjustment_per_heap; heap_budget_in_region_units[i][kind] = max (0, new_budget); } #endif //MULTIPLE_HEAPS } else { num_regions_to_decommit[kind] = balance; dprintf(REGIONS_LOG, ("distributing the %Id %s regions, removing %Id regions", total_budget_in_region_units[kind], kind_name[kind], num_regions_to_decommit[kind])); if (num_regions_to_decommit[kind] > 0) { // put the highest regions on the decommit list global_region_allocator.move_highest_free_regions (num_regions_to_decommit[kind]*region_factor[kind], kind == basic_free_region, global_regions_to_decommit); dprintf (REGIONS_LOG, ("Moved %Id %s regions to decommit list", global_regions_to_decommit[kind].get_num_free_regions(), kind_name[kind])); if (kind == basic_free_region) { assert (global_regions_to_decommit[kind].get_num_free_regions() == (size_t)num_regions_to_decommit[kind]); } else { dprintf (REGIONS_LOG, ("Moved %Id %s regions to decommit list", global_regions_to_decommit[huge_free_region].get_num_free_regions(), kind_name[huge_free_region])); // cannot assert we moved any regions because there may be a single huge region with more than we want to decommit } } } } for (int kind = basic_free_region; kind < kind_count; kind++) { #ifdef MULTIPLE_HEAPS // now go through all the heaps and remove any free regions above the target count for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; if (hp->free_regions[kind].get_num_free_regions() > heap_budget_in_region_units[i][kind]) { dprintf (REGIONS_LOG, ("removing %Id %s regions from heap %d with %Id regions", hp->free_regions[kind].get_num_free_regions() - heap_budget_in_region_units[i][kind], kind_name[kind], i, hp->free_regions[kind].get_num_free_regions())); remove_surplus_regions (&hp->free_regions[kind], &surplus_regions[kind], heap_budget_in_region_units[i][kind]); } } // finally go through all the heaps and distribute any surplus regions to heaps having too few free regions for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; const int i = 0; #endif //MULTIPLE_HEAPS if (hp->free_regions[kind].get_num_free_regions() < heap_budget_in_region_units[i][kind]) { int64_t num_added_regions = add_regions (&hp->free_regions[kind], &surplus_regions[kind], heap_budget_in_region_units[i][kind]); dprintf (REGIONS_LOG, ("added %Id %s regions to heap %d - now has %Id", num_added_regions, kind_name[kind], i, hp->free_regions[kind].get_num_free_regions())); } hp->free_regions[kind].sort_by_committed_and_age(); } if (surplus_regions[kind].get_num_free_regions() > 0) { assert (!"should have exhausted the surplus_regions"); global_regions_to_decommit[kind].transfer_regions (&surplus_regions[kind]); } } #ifdef MULTIPLE_HEAPS for (int kind = basic_free_region; kind < count_free_region_kinds; kind++) { if (global_regions_to_decommit[kind].get_num_free_regions() != 0) { gradual_decommit_in_progress_p = TRUE; break; } } #else //MULTIPLE_HEAPS while (decommit_step()) { } #endif //MULTIPLE_HEAPS #endif //USE_REGIONS } #ifdef WRITE_WATCH uint8_t* g_addresses [array_size+2]; // to get around the bug in GetWriteWatch #ifdef CARD_BUNDLE inline void gc_heap::verify_card_bundle_bits_set(size_t first_card_word, size_t last_card_word) { #ifdef _DEBUG for (size_t x = cardw_card_bundle (first_card_word); x < cardw_card_bundle (last_card_word); x++) { if (!card_bundle_set_p (x)) { assert (!"Card bundle not set"); dprintf (3, ("Card bundle %Ix not set", x)); } } #else UNREFERENCED_PARAMETER(first_card_word); UNREFERENCED_PARAMETER(last_card_word); #endif } // Verifies that any bundles that are not set represent only cards that are not set. inline void gc_heap::verify_card_bundles() { #ifdef _DEBUG size_t lowest_card = card_word (card_of (lowest_address)); #ifdef USE_REGIONS size_t highest_card = card_word (card_of (global_region_allocator.get_left_used_unsafe())); #else size_t highest_card = card_word (card_of (highest_address)); #endif size_t cardb = cardw_card_bundle (lowest_card); size_t end_cardb = cardw_card_bundle (align_cardw_on_bundle (highest_card)); while (cardb < end_cardb) { uint32_t* card_word = &card_table[max(card_bundle_cardw (cardb), lowest_card)]; uint32_t* card_word_end = &card_table[min(card_bundle_cardw (cardb+1), highest_card)]; if (card_bundle_set_p (cardb) == 0) { // Verify that no card is set while (card_word < card_word_end) { if (*card_word != 0) { dprintf (3, ("gc: %d, Card word %Ix for address %Ix set, card_bundle %Ix clear", dd_collection_count (dynamic_data_of (0)), (size_t)(card_word-&card_table[0]), (size_t)(card_address ((size_t)(card_word-&card_table[0]) * card_word_width)), cardb)); } assert((*card_word)==0); card_word++; } } cardb++; } #endif } // If card bundles are enabled, use write watch to find pages in the card table that have // been dirtied, and set the corresponding card bundle bits. void gc_heap::update_card_table_bundle() { if (card_bundles_enabled()) { // The address of the card word containing the card representing the lowest heap address uint8_t* base_address = (uint8_t*)(&card_table[card_word (card_of (lowest_address))]); // The address of the card word containing the card representing the highest heap address #ifdef USE_REGIONS uint8_t* high_address = (uint8_t*)(&card_table[card_word (card_of (global_region_allocator.get_left_used_unsafe()))]); #else uint8_t* high_address = (uint8_t*)(&card_table[card_word (card_of (highest_address))]); #endif //USE_REGIONS uint8_t* saved_base_address = base_address; uintptr_t bcount = array_size; size_t saved_region_size = align_on_page (high_address) - saved_base_address; do { size_t region_size = align_on_page (high_address) - base_address; dprintf (3,("Probing card table pages [%Ix, %Ix[", (size_t)base_address, (size_t)(base_address + region_size))); bool success = GCToOSInterface::GetWriteWatch(false /* resetState */, base_address, region_size, (void**)g_addresses, &bcount); assert (success && "GetWriteWatch failed!"); dprintf (3,("Found %d pages written", bcount)); for (unsigned i = 0; i < bcount; i++) { // Offset of the dirty page from the start of the card table (clamped to base_address) size_t bcardw = (uint32_t*)(max(g_addresses[i],base_address)) - &card_table[0]; // Offset of the end of the page from the start of the card table (clamped to high addr) size_t ecardw = (uint32_t*)(min(g_addresses[i]+OS_PAGE_SIZE, high_address)) - &card_table[0]; assert (bcardw >= card_word (card_of (g_gc_lowest_address))); // Set the card bundle bits representing the dirty card table page card_bundles_set (cardw_card_bundle (bcardw), cardw_card_bundle (align_cardw_on_bundle (ecardw))); dprintf (3,("Set Card bundle [%Ix, %Ix[", cardw_card_bundle (bcardw), cardw_card_bundle (align_cardw_on_bundle (ecardw)))); verify_card_bundle_bits_set(bcardw, ecardw); } if (bcount >= array_size) { base_address = g_addresses [array_size-1] + OS_PAGE_SIZE; bcount = array_size; } } while ((bcount >= array_size) && (base_address < high_address)); // Now that we've updated the card bundle bits, reset the write-tracking state. GCToOSInterface::ResetWriteWatch (saved_base_address, saved_region_size); } } #endif //CARD_BUNDLE #ifdef BACKGROUND_GC // static void gc_heap::reset_write_watch_for_gc_heap(void* base_address, size_t region_size) { #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP SoftwareWriteWatch::ClearDirty(base_address, region_size); #else // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP GCToOSInterface::ResetWriteWatch(base_address, region_size); #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP } // static void gc_heap::get_write_watch_for_gc_heap(bool reset, void *base_address, size_t region_size, void** dirty_pages, uintptr_t* dirty_page_count_ref, bool is_runtime_suspended) { #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP SoftwareWriteWatch::GetDirty(base_address, region_size, dirty_pages, dirty_page_count_ref, reset, is_runtime_suspended); #else // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP UNREFERENCED_PARAMETER(is_runtime_suspended); bool success = GCToOSInterface::GetWriteWatch(reset, base_address, region_size, dirty_pages, dirty_page_count_ref); assert(success); #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP } const size_t ww_reset_quantum = 128*1024*1024; inline void gc_heap::switch_one_quantum() { enable_preemptive (); GCToOSInterface::Sleep (1); disable_preemptive (true); } void gc_heap::reset_ww_by_chunk (uint8_t* start_address, size_t total_reset_size) { size_t reset_size = 0; size_t remaining_reset_size = 0; size_t next_reset_size = 0; while (reset_size != total_reset_size) { remaining_reset_size = total_reset_size - reset_size; next_reset_size = ((remaining_reset_size >= ww_reset_quantum) ? ww_reset_quantum : remaining_reset_size); if (next_reset_size) { reset_write_watch_for_gc_heap(start_address, next_reset_size); reset_size += next_reset_size; switch_one_quantum(); } } assert (reset_size == total_reset_size); } // This does a Sleep(1) for every reset ww_reset_quantum bytes of reset // we do concurrently. void gc_heap::switch_on_reset (BOOL concurrent_p, size_t* current_total_reset_size, size_t last_reset_size) { if (concurrent_p) { *current_total_reset_size += last_reset_size; dprintf (2, ("reset %Id bytes so far", *current_total_reset_size)); if (*current_total_reset_size > ww_reset_quantum) { switch_one_quantum(); *current_total_reset_size = 0; } } } void gc_heap::reset_write_watch (BOOL concurrent_p) { #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP // Software write watch currently requires the runtime to be suspended during reset. // See SoftwareWriteWatch::ClearDirty(). assert(!concurrent_p); #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP dprintf (2, ("bgc lowest: %Ix, bgc highest: %Ix", background_saved_lowest_address, background_saved_highest_address)); size_t reset_size = 0; for (int i = get_start_generation_index(); i < total_generation_count; i++) { heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (i))); while (seg) { uint8_t* base_address = align_lower_page (heap_segment_mem (seg)); base_address = max (base_address, background_saved_lowest_address); uint8_t* high_address = ((seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg)); high_address = min (high_address, background_saved_highest_address); if (base_address < high_address) { size_t reset_size = 0; size_t region_size = high_address - base_address; dprintf (3, ("h%d, gen: %Ix, ww: [%Ix(%Id)", heap_number, i, (size_t)base_address, region_size)); //reset_ww_by_chunk (base_address, region_size); reset_write_watch_for_gc_heap(base_address, region_size); switch_on_reset (concurrent_p, &reset_size, region_size); } seg = heap_segment_next_rw (seg); concurrent_print_time_delta (i == max_generation ? "CRWW soh": "CRWW uoh"); } } } #endif //BACKGROUND_GC #endif //WRITE_WATCH #ifdef BACKGROUND_GC void gc_heap::restart_vm() { //assert (generation_allocation_pointer (youngest_generation) == 0); dprintf (3, ("Restarting EE")); STRESS_LOG0(LF_GC, LL_INFO10000, "Concurrent GC: Restarting EE\n"); ee_proceed_event.Set(); } inline void fire_alloc_wait_event (alloc_wait_reason awr, BOOL begin_p) { if (awr != awr_ignored) { if (begin_p) { FIRE_EVENT(BGCAllocWaitBegin, awr); } else { FIRE_EVENT(BGCAllocWaitEnd, awr); } } } void gc_heap::fire_alloc_wait_event_begin (alloc_wait_reason awr) { fire_alloc_wait_event (awr, TRUE); } void gc_heap::fire_alloc_wait_event_end (alloc_wait_reason awr) { fire_alloc_wait_event (awr, FALSE); } #endif //BACKGROUND_GC void gc_heap::make_generation (int gen_num, heap_segment* seg, uint8_t* start) { generation* gen = generation_of (gen_num); gen->gen_num = gen_num; #ifndef USE_REGIONS gen->allocation_start = start; gen->plan_allocation_start = 0; #endif //USE_REGIONS gen->allocation_context.alloc_ptr = 0; gen->allocation_context.alloc_limit = 0; gen->allocation_context.alloc_bytes = 0; gen->allocation_context.alloc_bytes_uoh = 0; gen->allocation_context_start_region = 0; gen->start_segment = seg; #ifdef USE_REGIONS dprintf (REGIONS_LOG, ("g%d start seg is %Ix-%Ix", gen_num, (size_t)seg, heap_segment_mem (seg))); gen->tail_region = seg; gen->plan_start_segment = 0; gen->tail_ro_region = 0; #endif //USE_REGIONS gen->allocation_segment = seg; gen->free_list_space = 0; gen->pinned_allocated = 0; gen->free_list_allocated = 0; gen->end_seg_allocated = 0; gen->condemned_allocated = 0; gen->sweep_allocated = 0; gen->free_obj_space = 0; gen->allocation_size = 0; gen->pinned_allocation_sweep_size = 0; gen->pinned_allocation_compact_size = 0; gen->allocate_end_seg_p = FALSE; gen->free_list_allocator.clear(); #ifdef DOUBLY_LINKED_FL gen->set_bgc_mark_bit_p = FALSE; #endif //DOUBLY_LINKED_FL #ifdef FREE_USAGE_STATS memset (gen->gen_free_spaces, 0, sizeof (gen->gen_free_spaces)); memset (gen->gen_current_pinned_free_spaces, 0, sizeof (gen->gen_current_pinned_free_spaces)); memset (gen->gen_plugs, 0, sizeof (gen->gen_plugs)); #endif //FREE_USAGE_STATS } void gc_heap::adjust_ephemeral_limits () { #ifndef USE_REGIONS ephemeral_low = generation_allocation_start (generation_of (max_generation - 1)); ephemeral_high = heap_segment_reserved (ephemeral_heap_segment); dprintf (3, ("new ephemeral low: %Ix new ephemeral high: %Ix", (size_t)ephemeral_low, (size_t)ephemeral_high)) #ifndef MULTIPLE_HEAPS // This updates the write barrier helpers with the new info. stomp_write_barrier_ephemeral(ephemeral_low, ephemeral_high); #endif // MULTIPLE_HEAPS #endif //USE_REGIONS } #if defined(TRACE_GC) || defined(GC_CONFIG_DRIVEN) FILE* CreateLogFile(const GCConfigStringHolder& temp_logfile_name, bool is_config) { FILE* logFile; if (!temp_logfile_name.Get()) { return nullptr; } char logfile_name[MAX_LONGPATH+1]; //uint32_t pid = GCToOSInterface::GetCurrentProcessId(); const char* suffix = is_config ? ".config.log" : ".log"; //_snprintf_s(logfile_name, MAX_LONGPATH+1, _TRUNCATE, "%s.%d%s", temp_logfile_name.Get(), pid, suffix); _snprintf_s(logfile_name, MAX_LONGPATH+1, _TRUNCATE, "%s%s", temp_logfile_name.Get(), suffix); logFile = fopen(logfile_name, "wb"); return logFile; } #endif //TRACE_GC || GC_CONFIG_DRIVEN size_t gc_heap::get_segment_size_hard_limit (uint32_t* num_heaps, bool should_adjust_num_heaps) { assert (heap_hard_limit); size_t aligned_hard_limit = align_on_segment_hard_limit (heap_hard_limit); if (should_adjust_num_heaps) { uint32_t max_num_heaps = (uint32_t)(aligned_hard_limit / min_segment_size_hard_limit); if (*num_heaps > max_num_heaps) { *num_heaps = max_num_heaps; } } size_t seg_size = aligned_hard_limit / *num_heaps; size_t aligned_seg_size = (use_large_pages_p ? align_on_segment_hard_limit (seg_size) : round_up_power2 (seg_size)); assert (g_theGCHeap->IsValidSegmentSize (aligned_seg_size)); size_t seg_size_from_config = (size_t)GCConfig::GetSegmentSize(); if (seg_size_from_config) { size_t aligned_seg_size_config = (use_large_pages_p ? align_on_segment_hard_limit (seg_size) : round_up_power2 (seg_size_from_config)); aligned_seg_size = max (aligned_seg_size, aligned_seg_size_config); } //printf ("limit: %Idmb, aligned: %Idmb, %d heaps, seg size from config: %Idmb, seg size %Idmb", // (heap_hard_limit / 1024 / 1024), // (aligned_hard_limit / 1024 / 1024), // *num_heaps, // (seg_size_from_config / 1024 / 1024), // (aligned_seg_size / 1024 / 1024)); return aligned_seg_size; } #ifdef USE_REGIONS bool allocate_initial_regions(int number_of_heaps) { initial_regions = new (nothrow) uint8_t*[number_of_heaps][total_generation_count][2]; if (initial_regions == nullptr) { return false; } for (int i = 0; i < number_of_heaps; i++) { bool succeed = global_region_allocator.allocate_large_region( &initial_regions[i][poh_generation][0], &initial_regions[i][poh_generation][1], allocate_forward, 0, nullptr); assert(succeed); } for (int i = 0; i < number_of_heaps; i++) { for (int gen = max_generation; gen >= 0; gen--) { bool succeed = global_region_allocator.allocate_basic_region( &initial_regions[i][gen][0], &initial_regions[i][gen][1], nullptr); assert(succeed); } } for (int i = 0; i < number_of_heaps; i++) { bool succeed = global_region_allocator.allocate_large_region( &initial_regions[i][loh_generation][0], &initial_regions[i][loh_generation][1], allocate_forward, 0, nullptr); assert(succeed); } return true; } #endif HRESULT gc_heap::initialize_gc (size_t soh_segment_size, size_t loh_segment_size, size_t poh_segment_size #ifdef MULTIPLE_HEAPS ,int number_of_heaps #endif //MULTIPLE_HEAPS ) { #ifdef TRACE_GC if (GCConfig::GetLogEnabled()) { gc_log = CreateLogFile(GCConfig::GetLogFile(), false); if (gc_log == NULL) return E_FAIL; // GCLogFileSize in MBs. gc_log_file_size = static_cast<size_t>(GCConfig::GetLogFileSize()); if (gc_log_file_size <= 0 || gc_log_file_size > 500) { fclose (gc_log); return E_FAIL; } gc_log_lock.Initialize(); gc_log_buffer = new (nothrow) uint8_t [gc_log_buffer_size]; if (!gc_log_buffer) { fclose(gc_log); return E_FAIL; } memset (gc_log_buffer, '*', gc_log_buffer_size); max_gc_buffers = gc_log_file_size * 1024 * 1024 / gc_log_buffer_size; } #endif // TRACE_GC #ifdef GC_CONFIG_DRIVEN if (GCConfig::GetConfigLogEnabled()) { gc_config_log = CreateLogFile(GCConfig::GetConfigLogFile(), true); if (gc_config_log == NULL) return E_FAIL; gc_config_log_buffer = new (nothrow) uint8_t [gc_config_log_buffer_size]; if (!gc_config_log_buffer) { fclose(gc_config_log); return E_FAIL; } compact_ratio = static_cast<int>(GCConfig::GetCompactRatio()); // h# | GC | gen | C | EX | NF | BF | ML | DM || PreS | PostS | Merge | Conv | Pre | Post | PrPo | PreP | PostP | cprintf (("%2s | %6s | %1s | %1s | %2s | %2s | %2s | %2s | %2s || %5s | %5s | %5s | %5s | %5s | %5s | %5s | %5s | %5s |", "h#", // heap index "GC", // GC index "g", // generation "C", // compaction (empty means sweeping), 'M' means it was mandatory, 'W' means it was not "EX", // heap expansion "NF", // normal fit "BF", // best fit (if it indicates neither NF nor BF it means it had to acquire a new seg. "ML", // mark list "DM", // demotion "PreS", // short object before pinned plug "PostS", // short object after pinned plug "Merge", // merged pinned plugs "Conv", // converted to pinned plug "Pre", // plug before pinned plug but not after "Post", // plug after pinned plug but not before "PrPo", // plug both before and after pinned plug "PreP", // pre short object padded "PostP" // post short object padded )); } #endif //GC_CONFIG_DRIVEN HRESULT hres = S_OK; #ifdef WRITE_WATCH hardware_write_watch_api_supported(); #ifdef BACKGROUND_GC if (can_use_write_watch_for_gc_heap() && GCConfig::GetConcurrentGC()) { gc_can_use_concurrent = true; #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP virtual_alloc_hardware_write_watch = true; #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP } else { gc_can_use_concurrent = false; } #endif //BACKGROUND_GC #endif //WRITE_WATCH #ifdef BACKGROUND_GC // leave the first page to contain only segment info // because otherwise we could need to revisit the first page frequently in // background GC. segment_info_size = OS_PAGE_SIZE; #else segment_info_size = Align (sizeof (heap_segment), get_alignment_constant (FALSE)); #endif //BACKGROUND_GC reserved_memory = 0; size_t initial_heap_size = soh_segment_size + loh_segment_size + poh_segment_size; uint16_t* heap_no_to_numa_node = nullptr; #ifdef MULTIPLE_HEAPS reserved_memory_limit = initial_heap_size * number_of_heaps; if (!heap_select::init(number_of_heaps)) return E_OUTOFMEMORY; if (GCToOSInterface::CanEnableGCNumaAware()) heap_no_to_numa_node = heap_select::heap_no_to_numa_node; #else //MULTIPLE_HEAPS reserved_memory_limit = initial_heap_size; int number_of_heaps = 1; #endif //MULTIPLE_HEAPS if (heap_hard_limit) { check_commit_cs.Initialize(); } #ifdef USE_REGIONS if (regions_range) { // REGIONS TODO: we should reserve enough space at the end of what we reserved that's // big enough to accommodate if we were to materialize all the GC bookkeeping datastructures. // We only need to commit what we use and just need to commit more instead of having to // relocate the exising table and then calling copy_brick_card_table. // Right now all the non mark array portions are commmitted since I'm calling mark_card_table // on the whole range. This can be committed as needed. size_t reserve_size = regions_range; uint8_t* reserve_range = (uint8_t*)virtual_alloc (reserve_size, use_large_pages_p); if (!reserve_range) return E_OUTOFMEMORY; if (!global_region_allocator.init (reserve_range, (reserve_range + reserve_size), ((size_t)1 << min_segment_size_shr), &g_gc_lowest_address, &g_gc_highest_address)) return E_OUTOFMEMORY; bookkeeping_covered_start = global_region_allocator.get_start(); if (!allocate_initial_regions(number_of_heaps)) return E_OUTOFMEMORY; } else { assert (!"cannot use regions without specifying the range!!!"); return E_FAIL; } #else //USE_REGIONS bool separated_poh_p = use_large_pages_p && heap_hard_limit_oh[soh] && (GCConfig::GetGCHeapHardLimitPOH() == 0) && (GCConfig::GetGCHeapHardLimitPOHPercent() == 0); if (!reserve_initial_memory (soh_segment_size, loh_segment_size, poh_segment_size, number_of_heaps, use_large_pages_p, separated_poh_p, heap_no_to_numa_node)) return E_OUTOFMEMORY; if (separated_poh_p) { heap_hard_limit_oh[poh] = min_segment_size_hard_limit * number_of_heaps; heap_hard_limit += heap_hard_limit_oh[poh]; } #endif //USE_REGIONS #ifdef CARD_BUNDLE //check if we need to turn on card_bundles. #ifdef MULTIPLE_HEAPS // use INT64 arithmetic here because of possible overflow on 32p uint64_t th = (uint64_t)MH_TH_CARD_BUNDLE*number_of_heaps; #else // use INT64 arithmetic here because of possible overflow on 32p uint64_t th = (uint64_t)SH_TH_CARD_BUNDLE; #endif //MULTIPLE_HEAPS if (can_use_write_watch_for_card_table() && reserved_memory >= th) { settings.card_bundles = TRUE; } else { settings.card_bundles = FALSE; } #endif //CARD_BUNDLE settings.first_init(); int latency_level_from_config = static_cast<int>(GCConfig::GetLatencyLevel()); if (latency_level_from_config >= latency_level_first && latency_level_from_config <= latency_level_last) { gc_heap::latency_level = static_cast<gc_latency_level>(latency_level_from_config); } init_static_data(); g_gc_card_table = make_card_table (g_gc_lowest_address, g_gc_highest_address); if (!g_gc_card_table) return E_OUTOFMEMORY; gc_started = FALSE; #ifdef MULTIPLE_HEAPS g_heaps = new (nothrow) gc_heap* [number_of_heaps]; if (!g_heaps) return E_OUTOFMEMORY; #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:22011) // Suppress PREFast warning about integer underflow/overflow #endif // _PREFAST_ #if !defined(USE_REGIONS) || defined(_DEBUG) g_promoted = new (nothrow) size_t [number_of_heaps*16]; if (!g_promoted) return E_OUTOFMEMORY; #endif //!USE_REGIONS || _DEBUG #ifdef BACKGROUND_GC g_bpromoted = new (nothrow) size_t [number_of_heaps*16]; if (!g_bpromoted) return E_OUTOFMEMORY; #endif #ifdef MH_SC_MARK g_mark_stack_busy = new (nothrow) int[(number_of_heaps+2)*HS_CACHE_LINE_SIZE/sizeof(int)]; #endif //MH_SC_MARK #ifdef _PREFAST_ #pragma warning(pop) #endif // _PREFAST_ #ifdef MH_SC_MARK if (!g_mark_stack_busy) return E_OUTOFMEMORY; #endif //MH_SC_MARK if (!create_thread_support (number_of_heaps)) return E_OUTOFMEMORY; #endif //MULTIPLE_HEAPS #ifdef MULTIPLE_HEAPS yp_spin_count_unit = 32 * number_of_heaps; #else yp_spin_count_unit = 32 * g_num_processors; #endif //MULTIPLE_HEAPS #if defined(__linux__) GCToEEInterface::UpdateGCEventStatus(static_cast<int>(GCEventStatus::GetEnabledLevel(GCEventProvider_Default)), static_cast<int>(GCEventStatus::GetEnabledKeywords(GCEventProvider_Default)), static_cast<int>(GCEventStatus::GetEnabledLevel(GCEventProvider_Private)), static_cast<int>(GCEventStatus::GetEnabledKeywords(GCEventProvider_Private))); #endif // __linux__ #ifdef USE_VXSORT InitSupportedInstructionSet ((int32_t)GCConfig::GetGCEnabledInstructionSets()); #endif if (!init_semi_shared()) { hres = E_FAIL; } return hres; } //Initializes PER_HEAP_ISOLATED data members. int gc_heap::init_semi_shared() { int ret = 0; #ifdef BGC_SERVO_TUNING uint32_t current_memory_load = 0; uint32_t sweep_flr_goal = 0; uint32_t sweep_flr_goal_loh = 0; #endif //BGC_SERVO_TUNING // This is used for heap expansion - it's to fix exactly the start for gen 0 // through (max_generation-1). When we expand the heap we allocate all these // gen starts at the beginning of the new ephemeral seg. eph_gen_starts_size = (Align (min_obj_size)) * max_generation; #ifdef MULTIPLE_HEAPS mark_list_size = min (100*1024, max (8192, soh_segment_size/(2*10*32))); g_mark_list = make_mark_list (mark_list_size*n_heaps); min_balance_threshold = alloc_quantum_balance_units * CLR_SIZE * 2; g_mark_list_copy = make_mark_list (mark_list_size*n_heaps); if (!g_mark_list_copy) { goto cleanup; } #else //MULTIPLE_HEAPS mark_list_size = max (8192, soh_segment_size/(64*32)); g_mark_list = make_mark_list (mark_list_size); #endif //MULTIPLE_HEAPS dprintf (3, ("mark_list_size: %d", mark_list_size)); if (!g_mark_list) { goto cleanup; } #ifdef MULTIPLE_HEAPS // gradual decommit: set size to some reasonable value per time interval max_decommit_step_size = ((DECOMMIT_SIZE_PER_MILLISECOND * DECOMMIT_TIME_STEP_MILLISECONDS) / n_heaps); // but do at least MIN_DECOMMIT_SIZE per step to make the OS call worthwhile max_decommit_step_size = max (max_decommit_step_size, MIN_DECOMMIT_SIZE); #endif //MULTIPLE_HEAPS #ifdef FEATURE_BASICFREEZE seg_table = sorted_table::make_sorted_table(); if (!seg_table) goto cleanup; #endif //FEATURE_BASICFREEZE segment_standby_list = 0; if (!full_gc_approach_event.CreateManualEventNoThrow(FALSE)) { goto cleanup; } if (!full_gc_end_event.CreateManualEventNoThrow(FALSE)) { goto cleanup; } fgn_loh_percent = 0; full_gc_approach_event_set = false; memset (full_gc_counts, 0, sizeof (full_gc_counts)); memset (&last_ephemeral_gc_info, 0, sizeof (last_ephemeral_gc_info)); memset (&last_full_blocking_gc_info, 0, sizeof (last_full_blocking_gc_info)); #ifdef BACKGROUND_GC memset (&last_bgc_info, 0, sizeof (last_bgc_info)); #endif //BACKGROUND_GC should_expand_in_full_gc = FALSE; #ifdef FEATURE_LOH_COMPACTION loh_compaction_always_p = GCConfig::GetLOHCompactionMode() != 0; loh_compaction_mode = loh_compaction_default; #endif //FEATURE_LOH_COMPACTION loh_size_threshold = (size_t)GCConfig::GetLOHThreshold(); assert (loh_size_threshold >= LARGE_OBJECT_SIZE); #ifdef BGC_SERVO_TUNING memset (bgc_tuning::gen_calc, 0, sizeof (bgc_tuning::gen_calc)); memset (bgc_tuning::gen_stats, 0, sizeof (bgc_tuning::gen_stats)); memset (bgc_tuning::current_bgc_end_data, 0, sizeof (bgc_tuning::current_bgc_end_data)); // for the outer loop - the ML (memory load) loop bgc_tuning::enable_fl_tuning = (GCConfig::GetBGCFLTuningEnabled() != 0); bgc_tuning::memory_load_goal = (uint32_t)GCConfig::GetBGCMemGoal(); bgc_tuning::memory_load_goal_slack = (uint32_t)GCConfig::GetBGCMemGoalSlack(); bgc_tuning::ml_kp = (double)GCConfig::GetBGCMLkp() / 1000.0; bgc_tuning::ml_ki = (double)GCConfig::GetBGCMLki() / 1000.0; bgc_tuning::ratio_correction_step = (double)GCConfig::GetBGCG2RatioStep() / 100.0; // for the inner loop - the alloc loop which calculates the allocated bytes in gen2 before // triggering the next BGC. bgc_tuning::above_goal_kp = (double)GCConfig::GetBGCFLkp() / 1000000.0; bgc_tuning::enable_ki = (GCConfig::GetBGCFLEnableKi() != 0); bgc_tuning::above_goal_ki = (double)GCConfig::GetBGCFLki() / 1000000.0; bgc_tuning::enable_kd = (GCConfig::GetBGCFLEnableKd() != 0); bgc_tuning::above_goal_kd = (double)GCConfig::GetBGCFLkd() / 100.0; bgc_tuning::enable_smooth = (GCConfig::GetBGCFLEnableSmooth() != 0); bgc_tuning::num_gen1s_smooth_factor = (double)GCConfig::GetBGCFLSmoothFactor() / 100.0; bgc_tuning::enable_tbh = (GCConfig::GetBGCFLEnableTBH() != 0); bgc_tuning::enable_ff = (GCConfig::GetBGCFLEnableFF() != 0); bgc_tuning::above_goal_ff = (double)GCConfig::GetBGCFLff() / 100.0; bgc_tuning::enable_gradual_d = (GCConfig::GetBGCFLGradualD() != 0); sweep_flr_goal = (uint32_t)GCConfig::GetBGCFLSweepGoal(); sweep_flr_goal_loh = (uint32_t)GCConfig::GetBGCFLSweepGoalLOH(); bgc_tuning::gen_calc[0].sweep_flr_goal = ((sweep_flr_goal == 0) ? 20.0 : (double)sweep_flr_goal); bgc_tuning::gen_calc[1].sweep_flr_goal = ((sweep_flr_goal_loh == 0) ? 20.0 : (double)sweep_flr_goal_loh); bgc_tuning::available_memory_goal = (uint64_t)((double)gc_heap::total_physical_mem * (double)(100 - bgc_tuning::memory_load_goal) / 100); get_memory_info (&current_memory_load); dprintf (BGC_TUNING_LOG, ("BTL tuning %s!!!", (bgc_tuning::enable_fl_tuning ? "enabled" : "disabled"))); #ifdef SIMPLE_DPRINTF dprintf (BGC_TUNING_LOG, ("BTL tuning parameters: mem goal: %d%%(%I64d), +/-%d%%, gen2 correction factor: %.2f, sweep flr goal: %d%%, smooth factor: %.3f(%s), TBH: %s, FF: %.3f(%s), ml: kp %.5f, ki %.10f", bgc_tuning::memory_load_goal, bgc_tuning::available_memory_goal, bgc_tuning::memory_load_goal_slack, bgc_tuning::ratio_correction_step, (int)bgc_tuning::gen_calc[0].sweep_flr_goal, bgc_tuning::num_gen1s_smooth_factor, (bgc_tuning::enable_smooth ? "enabled" : "disabled"), (bgc_tuning::enable_tbh ? "enabled" : "disabled"), bgc_tuning::above_goal_ff, (bgc_tuning::enable_ff ? "enabled" : "disabled"), bgc_tuning::ml_kp, bgc_tuning::ml_ki)); dprintf (BGC_TUNING_LOG, ("BTL tuning parameters: kp: %.5f, ki: %.5f (%s), kd: %.3f (kd-%s, gd-%s), ff: %.3f", bgc_tuning::above_goal_kp, bgc_tuning::above_goal_ki, (bgc_tuning::enable_ki ? "enabled" : "disabled"), bgc_tuning::above_goal_kd, (bgc_tuning::enable_kd ? "enabled" : "disabled"), (bgc_tuning::enable_gradual_d ? "enabled" : "disabled"), bgc_tuning::above_goal_ff)); #endif //SIMPLE_DPRINTF if (bgc_tuning::enable_fl_tuning && (current_memory_load < bgc_tuning::memory_load_goal)) { uint32_t distance_to_goal = bgc_tuning::memory_load_goal - current_memory_load; bgc_tuning::stepping_interval = max (distance_to_goal / 10, 1); bgc_tuning::last_stepping_mem_load = current_memory_load; bgc_tuning::last_stepping_bgc_count = 0; dprintf (BGC_TUNING_LOG, ("current ml: %d, %d to goal, interval: %d", current_memory_load, distance_to_goal, bgc_tuning::stepping_interval)); } else { dprintf (BGC_TUNING_LOG, ("current ml: %d, >= goal: %d, disable stepping", current_memory_load, bgc_tuning::memory_load_goal)); bgc_tuning::use_stepping_trigger_p = false; } #endif //BGC_SERVO_TUNING #ifdef BACKGROUND_GC memset (ephemeral_fgc_counts, 0, sizeof (ephemeral_fgc_counts)); bgc_alloc_spin_count = static_cast<uint32_t>(GCConfig::GetBGCSpinCount()); bgc_alloc_spin = static_cast<uint32_t>(GCConfig::GetBGCSpin()); { int number_bgc_threads = get_num_heaps(); if (!create_bgc_threads_support (number_bgc_threads)) { goto cleanup; } } #endif //BACKGROUND_GC memset (&current_no_gc_region_info, 0, sizeof (current_no_gc_region_info)); #ifdef GC_CONFIG_DRIVEN compact_or_sweep_gcs[0] = 0; compact_or_sweep_gcs[1] = 0; #endif //GC_CONFIG_DRIVEN #ifdef SHORT_PLUGS short_plugs_pad_ratio = (double)DESIRED_PLUG_LENGTH / (double)(DESIRED_PLUG_LENGTH - Align (min_obj_size)); #endif //SHORT_PLUGS generation_skip_ratio_threshold = (int)GCConfig::GetGCLowSkipRatio(); #ifdef FEATURE_EVENT_TRACE gc_time_info = new (nothrow) uint64_t[max_compact_time_type]; if (!gc_time_info) { goto cleanup; } #ifdef BACKGROUND_GC bgc_time_info = new (nothrow) uint64_t[max_bgc_time_type]; if (!bgc_time_info) { goto cleanup; } #endif //BACKGROUND_GC #ifdef FEATURE_LOH_COMPACTION loh_compact_info = new (nothrow) etw_loh_compact_info [get_num_heaps()]; if (!loh_compact_info) { goto cleanup; } #endif //FEATURE_LOH_COMPACTION #endif //FEATURE_EVENT_TRACE conserve_mem_setting = (int)GCConfig::GetGCConserveMem(); if (conserve_mem_setting < 0) conserve_mem_setting = 0; if (conserve_mem_setting > 9) conserve_mem_setting = 9; dprintf (1, ("conserve_mem_setting = %d", conserve_mem_setting)); ret = 1; cleanup: if (!ret) { if (full_gc_approach_event.IsValid()) { full_gc_approach_event.CloseEvent(); } if (full_gc_end_event.IsValid()) { full_gc_end_event.CloseEvent(); } } return ret; } gc_heap* gc_heap::make_gc_heap ( #ifdef MULTIPLE_HEAPS GCHeap* vm_hp, int heap_number #endif //MULTIPLE_HEAPS ) { gc_heap* res = 0; #ifdef MULTIPLE_HEAPS res = new (nothrow) gc_heap; if (!res) return 0; res->vm_heap = vm_hp; res->alloc_context_count = 0; #ifndef USE_REGIONS res->mark_list_piece_start = new (nothrow) uint8_t**[n_heaps]; if (!res->mark_list_piece_start) return 0; #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:22011) // Suppress PREFast warning about integer underflow/overflow #endif // _PREFAST_ res->mark_list_piece_end = new (nothrow) uint8_t**[n_heaps + 32]; // +32 is padding to reduce false sharing #ifdef _PREFAST_ #pragma warning(pop) #endif // _PREFAST_ if (!res->mark_list_piece_end) return 0; #endif //!USE_REGIONS #endif //MULTIPLE_HEAPS if (res->init_gc_heap ( #ifdef MULTIPLE_HEAPS heap_number #else //MULTIPLE_HEAPS 0 #endif //MULTIPLE_HEAPS )==0) { return 0; } #ifdef MULTIPLE_HEAPS return res; #else return (gc_heap*)1; #endif //MULTIPLE_HEAPS } uint32_t gc_heap::wait_for_gc_done(int32_t timeOut) { bool cooperative_mode = enable_preemptive (); uint32_t dwWaitResult = NOERROR; gc_heap* wait_heap = NULL; while (gc_heap::gc_started) { #ifdef MULTIPLE_HEAPS wait_heap = GCHeap::GetHeap(heap_select::select_heap(NULL))->pGenGCHeap; dprintf(2, ("waiting for the gc_done_event on heap %d", wait_heap->heap_number)); #endif // MULTIPLE_HEAPS #ifdef _PREFAST_ PREFIX_ASSUME(wait_heap != NULL); #endif // _PREFAST_ dwWaitResult = wait_heap->gc_done_event.Wait(timeOut, FALSE); } disable_preemptive (cooperative_mode); return dwWaitResult; } void gc_heap::set_gc_done() { enter_gc_done_event_lock(); if (!gc_done_event_set) { gc_done_event_set = true; dprintf (2, ("heap %d: setting gc_done_event", heap_number)); gc_done_event.Set(); } exit_gc_done_event_lock(); } void gc_heap::reset_gc_done() { enter_gc_done_event_lock(); if (gc_done_event_set) { gc_done_event_set = false; dprintf (2, ("heap %d: resetting gc_done_event", heap_number)); gc_done_event.Reset(); } exit_gc_done_event_lock(); } void gc_heap::enter_gc_done_event_lock() { uint32_t dwSwitchCount = 0; retry: if (Interlocked::CompareExchange(&gc_done_event_lock, 0, -1) >= 0) { while (gc_done_event_lock >= 0) { if (g_num_processors > 1) { int spin_count = yp_spin_count_unit; for (int j = 0; j < spin_count; j++) { if (gc_done_event_lock < 0) break; YieldProcessor(); // indicate to the processor that we are spinning } if (gc_done_event_lock >= 0) GCToOSInterface::YieldThread(++dwSwitchCount); } else GCToOSInterface::YieldThread(++dwSwitchCount); } goto retry; } } void gc_heap::exit_gc_done_event_lock() { gc_done_event_lock = -1; } #ifndef MULTIPLE_HEAPS #ifdef RECORD_LOH_STATE int gc_heap::loh_state_index = 0; gc_heap::loh_state_info gc_heap::last_loh_states[max_saved_loh_states]; #endif //RECORD_LOH_STATE VOLATILE(int32_t) gc_heap::gc_done_event_lock; VOLATILE(bool) gc_heap::gc_done_event_set; GCEvent gc_heap::gc_done_event; #endif //!MULTIPLE_HEAPS VOLATILE(bool) gc_heap::internal_gc_done; void gc_heap::add_saved_spinlock_info ( bool loh_p, msl_enter_state enter_state, msl_take_state take_state) { #ifdef SPINLOCK_HISTORY spinlock_info* current = &last_spinlock_info[spinlock_info_index]; current->enter_state = enter_state; current->take_state = take_state; current->thread_id.SetToCurrentThread(); current->loh_p = loh_p; dprintf (SPINLOCK_LOG, ("[%d]%s %s %s", heap_number, (loh_p ? "loh" : "soh"), ((enter_state == me_acquire) ? "E" : "L"), msl_take_state_str[take_state])); spinlock_info_index++; assert (spinlock_info_index <= max_saved_spinlock_info); if (spinlock_info_index >= max_saved_spinlock_info) { spinlock_info_index = 0; } #else UNREFERENCED_PARAMETER(enter_state); UNREFERENCED_PARAMETER(take_state); #endif //SPINLOCK_HISTORY } int gc_heap::init_gc_heap (int h_number) { #ifdef MULTIPLE_HEAPS time_bgc_last = 0; for (int oh_index = 0; oh_index < (gc_oh_num::total_oh_count - 1); oh_index++) allocated_since_last_gc[oh_index] = 0; #ifdef SPINLOCK_HISTORY spinlock_info_index = 0; memset (last_spinlock_info, 0, sizeof(last_spinlock_info)); #endif //SPINLOCK_HISTORY // initialize per heap members. #ifndef USE_REGIONS ephemeral_low = (uint8_t*)1; ephemeral_high = MAX_PTR; #endif //!USE_REGIONS gc_low = 0; gc_high = 0; ephemeral_heap_segment = 0; oomhist_index_per_heap = 0; freeable_uoh_segment = 0; condemned_generation_num = 0; blocking_collection = FALSE; generation_skip_ratio = 100; #ifdef FEATURE_CARD_MARKING_STEALING n_eph_soh = 0; n_gen_soh = 0; n_eph_loh = 0; n_gen_loh = 0; #endif //FEATURE_CARD_MARKING_STEALING mark_stack_tos = 0; mark_stack_bos = 0; mark_stack_array_length = 0; mark_stack_array = 0; #if defined (_DEBUG) && defined (VERIFY_HEAP) verify_pinned_queue_p = FALSE; #endif // _DEBUG && VERIFY_HEAP #ifdef FEATURE_LOH_COMPACTION loh_pinned_queue_tos = 0; loh_pinned_queue_bos = 0; loh_pinned_queue_length = 0; loh_pinned_queue_decay = LOH_PIN_DECAY; loh_pinned_queue = 0; #endif //FEATURE_LOH_COMPACTION min_overflow_address = MAX_PTR; max_overflow_address = 0; gen0_bricks_cleared = FALSE; gen0_must_clear_bricks = 0; allocation_quantum = CLR_SIZE; more_space_lock_soh = gc_lock; more_space_lock_uoh = gc_lock; ro_segments_in_range = FALSE; loh_alloc_since_cg = 0; new_heap_segment = NULL; gen0_allocated_after_gc_p = false; #ifdef RECORD_LOH_STATE loh_state_index = 0; #endif //RECORD_LOH_STATE #endif //MULTIPLE_HEAPS #ifdef MULTIPLE_HEAPS if (h_number > n_heaps) { assert (!"Number of heaps exceeded"); return 0; } heap_number = h_number; #endif //MULTIPLE_HEAPS memset (&oom_info, 0, sizeof (oom_info)); memset (&fgm_result, 0, sizeof (fgm_result)); memset (oomhist_per_heap, 0, sizeof (oomhist_per_heap)); if (!gc_done_event.CreateManualEventNoThrow(FALSE)) { return 0; } gc_done_event_lock = -1; gc_done_event_set = false; if (!init_dynamic_data()) { return 0; } uint32_t* ct = &g_gc_card_table [card_word (card_of (g_gc_lowest_address))]; own_card_table (ct); card_table = translate_card_table (ct); brick_table = card_table_brick_table (ct); highest_address = card_table_highest_address (ct); lowest_address = card_table_lowest_address (ct); #ifdef CARD_BUNDLE card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct), g_gc_lowest_address); assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] == card_table_card_bundle_table (ct)); #endif //CARD_BUNDLE #ifdef BACKGROUND_GC if (gc_can_use_concurrent) mark_array = translate_mark_array (card_table_mark_array (&g_gc_card_table[card_word (card_of (g_gc_lowest_address))])); else mark_array = NULL; #endif //BACKGROUND_GC #ifdef USE_REGIONS #ifdef STRESS_REGIONS // Handle table APIs expect coop so we temporarily switch to coop. disable_preemptive (true); pinning_handles_for_alloc = new (nothrow) (OBJECTHANDLE[PINNING_HANDLE_INITIAL_LENGTH]); for (int i = 0; i < PINNING_HANDLE_INITIAL_LENGTH; i++) { pinning_handles_for_alloc[i] = g_gcGlobalHandleStore->CreateHandleOfType (0, HNDTYPE_PINNED); } enable_preemptive(); ph_index_per_heap = 0; pinning_seg_interval = 2; num_gen0_regions = 0; sip_seg_interval = 2; sip_seg_maxgen_interval = 3; num_condemned_regions = 0; #endif //STRESS_REGIONS end_gen0_region_space = 0; gen0_pinned_free_space = 0; gen0_large_chunk_found = false; // REGIONS PERF TODO: we should really allocate the POH regions together just so that // they wouldn't prevent us from coalescing free regions to form a large virtual address // range. if (!initial_make_soh_regions (__this) || !initial_make_uoh_regions (loh_generation, __this) || !initial_make_uoh_regions (poh_generation, __this)) { return 0; } #else //USE_REGIONS heap_segment* seg = make_initial_segment (soh_gen0, h_number, __this); if (!seg) return 0; FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(seg), (size_t)(heap_segment_reserved (seg) - heap_segment_mem(seg)), gc_etw_segment_small_object_heap); seg_mapping_table_add_segment (seg, __this); #ifdef MULTIPLE_HEAPS assert (heap_segment_heap (seg) == __this); #endif //MULTIPLE_HEAPS uint8_t* start = heap_segment_mem (seg); for (int i = max_generation; i >= 0; i--) { make_generation (i, seg, start); start += Align (min_obj_size); } heap_segment_allocated (seg) = start; alloc_allocated = start; heap_segment_used (seg) = start - plug_skew; ephemeral_heap_segment = seg; // Create segments for the large and pinned generations heap_segment* lseg = make_initial_segment(loh_generation, h_number, __this); if (!lseg) return 0; lseg->flags |= heap_segment_flags_loh; FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(lseg), (size_t)(heap_segment_reserved (lseg) - heap_segment_mem(lseg)), gc_etw_segment_large_object_heap); heap_segment* pseg = make_initial_segment (poh_generation, h_number, __this); if (!pseg) return 0; pseg->flags |= heap_segment_flags_poh; FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(pseg), (size_t)(heap_segment_reserved (pseg) - heap_segment_mem(pseg)), gc_etw_segment_pinned_object_heap); seg_mapping_table_add_segment (lseg, __this); seg_mapping_table_add_segment (pseg, __this); make_generation (loh_generation, lseg, heap_segment_mem (lseg)); make_generation (poh_generation, pseg, heap_segment_mem (pseg)); heap_segment_allocated (lseg) = heap_segment_mem (lseg) + Align (min_obj_size, get_alignment_constant (FALSE)); heap_segment_used (lseg) = heap_segment_allocated (lseg) - plug_skew; heap_segment_allocated (pseg) = heap_segment_mem (pseg) + Align (min_obj_size, get_alignment_constant (FALSE)); heap_segment_used (pseg) = heap_segment_allocated (pseg) - plug_skew; for (int gen_num = 0; gen_num < total_generation_count; gen_num++) { generation* gen = generation_of (gen_num); make_unused_array (generation_allocation_start (gen), Align (min_obj_size)); } #ifdef MULTIPLE_HEAPS assert (heap_segment_heap (lseg) == __this); assert (heap_segment_heap (pseg) == __this); #endif //MULTIPLE_HEAPS #endif //USE_REGIONS #ifdef MULTIPLE_HEAPS //initialize the alloc context heap generation_alloc_context (generation_of (soh_gen0))->set_alloc_heap(vm_heap); generation_alloc_context (generation_of (loh_generation))->set_alloc_heap(vm_heap); generation_alloc_context (generation_of (poh_generation))->set_alloc_heap(vm_heap); #endif //MULTIPLE_HEAPS generation_of (max_generation)->free_list_allocator = allocator(NUM_GEN2_ALIST, BASE_GEN2_ALIST_BITS, gen2_alloc_list, max_generation); generation_of (loh_generation)->free_list_allocator = allocator(NUM_LOH_ALIST, BASE_LOH_ALIST_BITS, loh_alloc_list); generation_of (poh_generation)->free_list_allocator = allocator(NUM_POH_ALIST, BASE_POH_ALIST_BITS, poh_alloc_list); for (int oh_index = 0; oh_index < (gc_oh_num::total_oh_count - 1); oh_index++) etw_allocation_running_amount[oh_index] = 0; total_alloc_bytes_soh = 0; total_alloc_bytes_uoh = 0; //needs to be done after the dynamic data has been initialized #ifndef MULTIPLE_HEAPS allocation_running_amount = dd_min_size (dynamic_data_of (0)); #endif //!MULTIPLE_HEAPS fgn_maxgen_percent = 0; fgn_last_alloc = dd_min_size (dynamic_data_of (0)); mark* arr = new (nothrow) (mark [MARK_STACK_INITIAL_LENGTH]); if (!arr) return 0; make_mark_stack(arr); #ifdef BACKGROUND_GC #ifdef BGC_SERVO_TUNING loh_a_no_bgc = 0; loh_a_bgc_marking = 0; loh_a_bgc_planning = 0; bgc_maxgen_end_fl_size = 0; #endif //BGC_SERVO_TUNING freeable_soh_segment = 0; gchist_index_per_heap = 0; if (gc_can_use_concurrent) { uint8_t** b_arr = new (nothrow) (uint8_t * [MARK_STACK_INITIAL_LENGTH]); if (!b_arr) return 0; make_background_mark_stack(b_arr); } #endif //BACKGROUND_GC #ifndef USE_REGIONS ephemeral_low = generation_allocation_start(generation_of(max_generation - 1)); ephemeral_high = heap_segment_reserved(ephemeral_heap_segment); #endif //!USE_REGIONS if (heap_number == 0) { stomp_write_barrier_initialize( #if defined(MULTIPLE_HEAPS) || defined(USE_REGIONS) reinterpret_cast<uint8_t*>(1), reinterpret_cast<uint8_t*>(~0) #else ephemeral_low, ephemeral_high #endif //!MULTIPLE_HEAPS || USE_REGIONS ); } #ifdef MULTIPLE_HEAPS if (!create_gc_thread ()) return 0; g_heaps [heap_number] = this; #endif //MULTIPLE_HEAPS #ifdef FEATURE_PREMORTEM_FINALIZATION HRESULT hr = AllocateCFinalize(&finalize_queue); if (FAILED(hr)) return 0; #endif // FEATURE_PREMORTEM_FINALIZATION max_free_space_items = MAX_NUM_FREE_SPACES; bestfit_seg = new (nothrow) seg_free_spaces (heap_number); if (!bestfit_seg) { return 0; } if (!bestfit_seg->alloc()) { return 0; } last_gc_before_oom = FALSE; sufficient_gen0_space_p = FALSE; #ifdef MULTIPLE_HEAPS #ifdef HEAP_ANALYZE heap_analyze_success = TRUE; internal_root_array = 0; internal_root_array_index = 0; internal_root_array_length = initial_internal_roots; current_obj = 0; current_obj_size = 0; #endif //HEAP_ANALYZE #endif // MULTIPLE_HEAPS #ifdef BACKGROUND_GC bgc_thread_id.Clear(); if (!create_bgc_thread_support()) { return 0; } bgc_alloc_lock = new (nothrow) exclusive_sync; if (!bgc_alloc_lock) { return 0; } bgc_alloc_lock->init(); bgc_thread_running = 0; bgc_thread = 0; bgc_threads_timeout_cs.Initialize(); current_bgc_state = bgc_not_in_process; background_soh_alloc_count = 0; background_uoh_alloc_count = 0; bgc_overflow_count = 0; end_loh_size = dd_min_size (dynamic_data_of (loh_generation)); end_poh_size = dd_min_size (dynamic_data_of (poh_generation)); current_sweep_pos = 0; #ifdef DOUBLY_LINKED_FL current_sweep_seg = 0; #endif //DOUBLY_LINKED_FL #endif //BACKGROUND_GC #ifdef GC_CONFIG_DRIVEN memset(interesting_data_per_heap, 0, sizeof (interesting_data_per_heap)); memset(compact_reasons_per_heap, 0, sizeof (compact_reasons_per_heap)); memset(expand_mechanisms_per_heap, 0, sizeof (expand_mechanisms_per_heap)); memset(interesting_mechanism_bits_per_heap, 0, sizeof (interesting_mechanism_bits_per_heap)); #endif //GC_CONFIG_DRIVEN return 1; } void gc_heap::destroy_semi_shared() { //TODO: will need to move this to per heap //#ifdef BACKGROUND_GC // if (c_mark_list) // delete c_mark_list; //#endif //BACKGROUND_GC if (g_mark_list) delete g_mark_list; if (seg_mapping_table) delete seg_mapping_table; #ifdef FEATURE_BASICFREEZE //destroy the segment map seg_table->delete_sorted_table(); #endif //FEATURE_BASICFREEZE } void gc_heap::self_destroy() { #ifdef BACKGROUND_GC kill_gc_thread(); #endif //BACKGROUND_GC if (gc_done_event.IsValid()) { gc_done_event.CloseEvent(); } // destroy every segment for (int i = get_start_generation_index(); i < total_generation_count; i++) { heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (i))); PREFIX_ASSUME(seg != NULL); while (seg) { heap_segment* next_seg = heap_segment_next_rw (seg); delete_heap_segment (seg); seg = next_seg; } } // get rid of the card table release_card_table (card_table); // destroy the mark stack delete mark_stack_array; #ifdef FEATURE_PREMORTEM_FINALIZATION if (finalize_queue) delete finalize_queue; #endif // FEATURE_PREMORTEM_FINALIZATION } void gc_heap::destroy_gc_heap(gc_heap* heap) { heap->self_destroy(); delete heap; } // Destroys resources owned by gc. It is assumed that a last GC has been performed and that // the finalizer queue has been drained. void gc_heap::shutdown_gc() { destroy_semi_shared(); #ifdef MULTIPLE_HEAPS //delete the heaps array delete g_heaps; destroy_thread_support(); n_heaps = 0; #endif //MULTIPLE_HEAPS //destroy seg_manager destroy_initial_memory(); GCToOSInterface::Shutdown(); } inline BOOL gc_heap::size_fit_p (size_t size REQD_ALIGN_AND_OFFSET_DCL, uint8_t* alloc_pointer, uint8_t* alloc_limit, uint8_t* old_loc, int use_padding) { BOOL already_padded = FALSE; #ifdef SHORT_PLUGS if ((old_loc != 0) && (use_padding & USE_PADDING_FRONT)) { alloc_pointer = alloc_pointer + Align (min_obj_size); already_padded = TRUE; } #endif //SHORT_PLUGS if (!((old_loc == 0) || same_large_alignment_p (old_loc, alloc_pointer))) size = size + switch_alignment_size (already_padded); #ifdef FEATURE_STRUCTALIGN alloc_pointer = StructAlign(alloc_pointer, requiredAlignment, alignmentOffset); #endif // FEATURE_STRUCTALIGN // in allocate_in_condemned_generation we can have this when we // set the alloc_limit to plan_allocated which could be less than // alloc_ptr if (alloc_limit < alloc_pointer) { return FALSE; } if (old_loc != 0) { return (((size_t)(alloc_limit - alloc_pointer) >= (size + ((use_padding & USE_PADDING_TAIL)? Align(min_obj_size) : 0))) #ifdef SHORT_PLUGS ||((!(use_padding & USE_PADDING_FRONT)) && ((alloc_pointer + size) == alloc_limit)) #else //SHORT_PLUGS ||((alloc_pointer + size) == alloc_limit) #endif //SHORT_PLUGS ); } else { assert (size == Align (min_obj_size)); return ((size_t)(alloc_limit - alloc_pointer) >= size); } } inline BOOL gc_heap::a_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit, int align_const) { // We could have run into cases where this is true when alloc_allocated is the // the same as the seg committed. if (alloc_limit < alloc_pointer) { return FALSE; } return ((size_t)(alloc_limit - alloc_pointer) >= (size + Align(min_obj_size, align_const))); } // Grow by committing more pages BOOL gc_heap::grow_heap_segment (heap_segment* seg, uint8_t* high_address, bool* hard_limit_exceeded_p) { assert (high_address <= heap_segment_reserved (seg)); if (hard_limit_exceeded_p) *hard_limit_exceeded_p = false; //return 0 if we are at the end of the segment. if (align_on_page (high_address) > heap_segment_reserved (seg)) return FALSE; if (high_address <= heap_segment_committed (seg)) return TRUE; size_t c_size = align_on_page ((size_t)(high_address - heap_segment_committed (seg))); c_size = max (c_size, commit_min_th); c_size = min (c_size, (size_t)(heap_segment_reserved (seg) - heap_segment_committed (seg))); if (c_size == 0) return FALSE; STRESS_LOG2(LF_GC, LL_INFO10000, "Growing heap_segment: %Ix high address: %Ix\n", (size_t)seg, (size_t)high_address); bool ret = virtual_commit (heap_segment_committed (seg), c_size, heap_segment_oh (seg), heap_number, hard_limit_exceeded_p); if (ret) { heap_segment_committed (seg) += c_size; STRESS_LOG1(LF_GC, LL_INFO10000, "New commit: %Ix\n", (size_t)heap_segment_committed (seg)); assert (heap_segment_committed (seg) <= heap_segment_reserved (seg)); assert (high_address <= heap_segment_committed (seg)); #if defined(MULTIPLE_HEAPS) && !defined(USE_REGIONS) // we should never increase committed beyond decommit target when gradual // decommit is in progress - if we do, this means commit and decommit are // going on at the same time. assert (!gradual_decommit_in_progress_p || (seg != ephemeral_heap_segment) || (heap_segment_committed (seg) <= heap_segment_decommit_target (seg))); #endif //MULTIPLE_HEAPS && !USE_REGIONS } return !!ret; } inline int gc_heap::grow_heap_segment (heap_segment* seg, uint8_t* allocated, uint8_t* old_loc, size_t size, BOOL pad_front_p REQD_ALIGN_AND_OFFSET_DCL) { BOOL already_padded = FALSE; #ifdef SHORT_PLUGS if ((old_loc != 0) && pad_front_p) { allocated = allocated + Align (min_obj_size); already_padded = TRUE; } #endif //SHORT_PLUGS if (!((old_loc == 0) || same_large_alignment_p (old_loc, allocated))) size += switch_alignment_size (already_padded); #ifdef FEATURE_STRUCTALIGN size_t pad = ComputeStructAlignPad(allocated, requiredAlignment, alignmentOffset); return grow_heap_segment (seg, allocated + pad + size); #else // FEATURE_STRUCTALIGN return grow_heap_segment (seg, allocated + size); #endif // FEATURE_STRUCTALIGN } // thread this object to the front of gen's free list and update stats. void gc_heap::thread_free_item_front (generation* gen, uint8_t* free_start, size_t free_size) { make_unused_array (free_start, free_size); generation_free_list_space (gen) += free_size; generation_allocator(gen)->thread_item_front (free_start, free_size); add_gen_free (gen->gen_num, free_size); if (gen->gen_num == max_generation) { dprintf (2, ("AO h%d: gen2F+: %Ix(%Id)->%Id, FO: %Id", heap_number, free_start, free_size, generation_free_list_space (gen), generation_free_obj_space (gen))); } } #ifdef DOUBLY_LINKED_FL void gc_heap::thread_item_front_added (generation* gen, uint8_t* free_start, size_t free_size) { make_unused_array (free_start, free_size); generation_free_list_space (gen) += free_size; int bucket_index = generation_allocator(gen)->thread_item_front_added (free_start, free_size); if (gen->gen_num == max_generation) { dprintf (2, ("AO [h%d] gen2FL+: %Ix(%Id)->%Id", heap_number, free_start, free_size, generation_free_list_space (gen))); } add_gen_free (gen->gen_num, free_size); } #endif //DOUBLY_LINKED_FL // this is for free objects that are not on the free list; also update stats. void gc_heap::make_free_obj (generation* gen, uint8_t* free_start, size_t free_size) { make_unused_array (free_start, free_size); generation_free_obj_space (gen) += free_size; if (gen->gen_num == max_generation) { dprintf (2, ("AO [h%d] gen2FO+: %Ix(%Id)->%Id", heap_number, free_start, free_size, generation_free_obj_space (gen))); } } //used only in older generation allocation (i.e during gc). void gc_heap::adjust_limit (uint8_t* start, size_t limit_size, generation* gen) { dprintf (3, ("gc Expanding segment allocation")); heap_segment* seg = generation_allocation_segment (gen); if ((generation_allocation_limit (gen) != start) || (start != heap_segment_plan_allocated (seg))) { if (generation_allocation_limit (gen) == heap_segment_plan_allocated (seg)) { assert (generation_allocation_pointer (gen) >= heap_segment_mem (seg)); assert (generation_allocation_pointer (gen) <= heap_segment_committed (seg)); heap_segment_plan_allocated (generation_allocation_segment (gen)) = generation_allocation_pointer (gen); } else { uint8_t* hole = generation_allocation_pointer (gen); size_t size = (generation_allocation_limit (gen) - generation_allocation_pointer (gen)); if (size != 0) { dprintf (3, ("filling up hole: %Ix, size %Ix", hole, size)); size_t allocated_size = generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen); #ifdef DOUBLY_LINKED_FL if (gen->gen_num == max_generation) { // For BGC since we need to thread the max_gen's free list as a doubly linked list we need to // preserve 5 ptr-sized words: SB | MT | Len | Next | Prev // This means we cannot simply make a filler free object right after what's allocated in this // alloc context if that's < 5-ptr sized. // if (allocated_size <= min_free_item_no_prev) { // We can't make the free object just yet. Need to record the size. size_t* filler_free_obj_size_location = (size_t*)(generation_allocation_context_start_region (gen) + min_free_item_no_prev); size_t filler_free_obj_size = 0; if (size >= (Align (min_free_list) + Align (min_obj_size))) { filler_free_obj_size = Align (min_obj_size); size_t fl_size = size - filler_free_obj_size; thread_item_front_added (gen, (hole + filler_free_obj_size), fl_size); } else { filler_free_obj_size = size; } generation_free_obj_space (gen) += filler_free_obj_size; *filler_free_obj_size_location = filler_free_obj_size; uint8_t* old_loc = generation_last_free_list_allocated (gen); // check if old_loc happens to be in a saved plug_and_gap with a pinned plug after it uint8_t* saved_plug_and_gap = nullptr; if (saved_pinned_plug_index != INVALID_SAVED_PINNED_PLUG_INDEX) { saved_plug_and_gap = pinned_plug (pinned_plug_of (saved_pinned_plug_index)) - sizeof(plug_and_gap); dprintf (3333, ("[h%d] sppi: %Id mtos: %Id old_loc: %Ix pp: %Ix(%Id) offs: %Id", heap_number, saved_pinned_plug_index, mark_stack_tos, old_loc, pinned_plug (pinned_plug_of (saved_pinned_plug_index)), pinned_len (pinned_plug_of (saved_pinned_plug_index)), old_loc - saved_plug_and_gap)); } size_t offset = old_loc - saved_plug_and_gap; if (offset < sizeof(gap_reloc_pair)) { // the object at old_loc must be at least min_obj_size assert (offset <= sizeof(plug_and_gap) - min_obj_size); // if so, set the bit in the saved info instead set_free_obj_in_compact_bit ((uint8_t*)(&pinned_plug_of (saved_pinned_plug_index)->saved_pre_plug_reloc) + offset); } else { #ifdef _DEBUG // check this looks like an object header(old_loc)->Validate(); #endif //_DEBUG set_free_obj_in_compact_bit (old_loc); } dprintf (3333, ("[h%d] ac: %Ix->%Ix((%Id < %Id), Pset %Ix s->%Id", heap_number, generation_allocation_context_start_region (gen), generation_allocation_pointer (gen), allocated_size, min_free_item_no_prev, filler_free_obj_size_location, filler_free_obj_size)); } else { if (size >= Align (min_free_list)) { thread_item_front_added (gen, hole, size); } else { make_free_obj (gen, hole, size); } } } else #endif //DOUBLY_LINKED_FL { // TODO: this should be written the same way as the above, ie, it should check // allocated_size first, but it doesn't need to do MAKE_FREE_OBJ_IN_COMPACT // related things. if (size >= Align (min_free_list)) { if (allocated_size < min_free_item_no_prev) { if (size >= (Align (min_free_list) + Align (min_obj_size))) { //split hole into min obj + threadable free item make_free_obj (gen, hole, min_obj_size); thread_free_item_front (gen, (hole + Align (min_obj_size)), (size - Align (min_obj_size))); } else { dprintf (3, ("allocated size too small, can't put back rest on free list %Ix", allocated_size)); make_free_obj (gen, hole, size); } } else { dprintf (3, ("threading hole in front of free list")); thread_free_item_front (gen, hole, size); } } else { make_free_obj (gen, hole, size); } } } } generation_allocation_pointer (gen) = start; generation_allocation_context_start_region (gen) = start; } generation_allocation_limit (gen) = (start + limit_size); } void verify_mem_cleared (uint8_t* start, size_t size) { if (!Aligned (size)) { FATAL_GC_ERROR(); } PTR_PTR curr_ptr = (PTR_PTR) start; for (size_t i = 0; i < size / sizeof(PTR_PTR); i++) { if (*(curr_ptr++) != 0) { FATAL_GC_ERROR(); } } } #if defined (VERIFY_HEAP) && defined (BACKGROUND_GC) void gc_heap::set_batch_mark_array_bits (uint8_t* start, uint8_t* end) { size_t start_mark_bit = mark_bit_of (start); size_t end_mark_bit = mark_bit_of (end); unsigned int startbit = mark_bit_bit (start_mark_bit); unsigned int endbit = mark_bit_bit (end_mark_bit); size_t startwrd = mark_bit_word (start_mark_bit); size_t endwrd = mark_bit_word (end_mark_bit); dprintf (3, ("Setting all mark array bits between [%Ix:%Ix-[%Ix:%Ix", (size_t)start, (size_t)start_mark_bit, (size_t)end, (size_t)end_mark_bit)); unsigned int firstwrd = ~(lowbits (~0, startbit)); unsigned int lastwrd = ~(highbits (~0, endbit)); if (startwrd == endwrd) { unsigned int wrd = firstwrd & lastwrd; mark_array[startwrd] |= wrd; return; } // set the first mark word. if (startbit) { mark_array[startwrd] |= firstwrd; startwrd++; } for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++) { mark_array[wrdtmp] = ~(unsigned int)0; } // set the last mark word. if (endbit) { mark_array[endwrd] |= lastwrd; } } // makes sure that the mark array bits between start and end are 0. void gc_heap::check_batch_mark_array_bits (uint8_t* start, uint8_t* end) { size_t start_mark_bit = mark_bit_of (start); size_t end_mark_bit = mark_bit_of (end); unsigned int startbit = mark_bit_bit (start_mark_bit); unsigned int endbit = mark_bit_bit (end_mark_bit); size_t startwrd = mark_bit_word (start_mark_bit); size_t endwrd = mark_bit_word (end_mark_bit); //dprintf (3, ("Setting all mark array bits between [%Ix:%Ix-[%Ix:%Ix", // (size_t)start, (size_t)start_mark_bit, // (size_t)end, (size_t)end_mark_bit)); unsigned int firstwrd = ~(lowbits (~0, startbit)); unsigned int lastwrd = ~(highbits (~0, endbit)); if (startwrd == endwrd) { unsigned int wrd = firstwrd & lastwrd; if (mark_array[startwrd] & wrd) { dprintf (1, ("The %Ix portion of mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", wrd, startwrd, mark_array [startwrd], mark_word_address (startwrd))); FATAL_GC_ERROR(); } return; } // set the first mark word. if (startbit) { if (mark_array[startwrd] & firstwrd) { dprintf (1, ("The %Ix portion of mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", firstwrd, startwrd, mark_array [startwrd], mark_word_address (startwrd))); FATAL_GC_ERROR(); } startwrd++; } for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++) { if (mark_array[wrdtmp]) { dprintf (1, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", wrdtmp, mark_array [wrdtmp], mark_word_address (wrdtmp))); FATAL_GC_ERROR(); } } // set the last mark word. if (endbit) { if (mark_array[endwrd] & lastwrd) { dprintf (1, ("The %Ix portion of mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", lastwrd, lastwrd, mark_array [lastwrd], mark_word_address (lastwrd))); FATAL_GC_ERROR(); } } } #endif //VERIFY_HEAP && BACKGROUND_GC allocator::allocator (unsigned int num_b, int fbb, alloc_list* b, int gen) { assert (num_b < MAX_BUCKET_COUNT); num_buckets = num_b; first_bucket_bits = fbb; buckets = b; gen_number = gen; } alloc_list& allocator::alloc_list_of (unsigned int bn) { assert (bn < num_buckets); if (bn == 0) return first_bucket; else return buckets [bn-1]; } size_t& allocator::alloc_list_damage_count_of (unsigned int bn) { assert (bn < num_buckets); if (bn == 0) return first_bucket.alloc_list_damage_count(); else return buckets [bn-1].alloc_list_damage_count(); } void allocator::unlink_item (unsigned int bn, uint8_t* item, uint8_t* prev_item, BOOL use_undo_p) { alloc_list* al = &alloc_list_of (bn); uint8_t* next_item = free_list_slot(item); #ifdef DOUBLY_LINKED_FL // if repair_list is TRUE yet use_undo_p is FALSE, it means we do need to make sure // this item does not look like it's on the free list as we will not have a chance to // do that later. BOOL repair_list = !discard_if_no_fit_p (); #endif //DOUBLY_LINKED_FL if (prev_item) { if (use_undo_p && (free_list_undo (prev_item) == UNDO_EMPTY)) { assert (item == free_list_slot (prev_item)); free_list_undo (prev_item) = item; alloc_list_damage_count_of (bn)++; } free_list_slot (prev_item) = next_item; } else { al->alloc_list_head() = next_item; } if (al->alloc_list_tail() == item) { al->alloc_list_tail() = prev_item; } #ifdef DOUBLY_LINKED_FL if (repair_list) { if (!use_undo_p) { free_list_prev (item) = PREV_EMPTY; } } if (gen_number == max_generation) { dprintf (3, ("[g%2d, b%2d]UL: %Ix->%Ix->%Ix (h: %Ix, t: %Ix)", gen_number, bn, free_list_prev (item), item, free_list_slot (item), al->alloc_list_head(), al->alloc_list_tail())); dprintf (3, ("[g%2d, b%2d]UL: exit, h->N: %Ix, h->P: %Ix, t->N: %Ix, t->P: %Ix", gen_number, bn, (al->alloc_list_head() ? free_list_slot (al->alloc_list_head()) : 0), (al->alloc_list_head() ? free_list_prev (al->alloc_list_head()) : 0), (al->alloc_list_tail() ? free_list_slot (al->alloc_list_tail()) : 0), (al->alloc_list_tail() ? free_list_prev (al->alloc_list_tail()) : 0))); } #endif //DOUBLY_LINKED_FL if (al->alloc_list_head() == 0) { assert (al->alloc_list_tail() == 0); } } #ifdef DOUBLY_LINKED_FL void allocator::unlink_item_no_undo (unsigned int bn, uint8_t* item, size_t size) { alloc_list* al = &alloc_list_of (bn); uint8_t* next_item = free_list_slot (item); uint8_t* prev_item = free_list_prev (item); #ifdef FL_VERIFICATION { uint8_t* start = al->alloc_list_head(); BOOL found_p = FALSE; while (start) { if (start == item) { found_p = TRUE; break; } start = free_list_slot (start); } if (!found_p) { dprintf (1, ("could not find %Ix in b%d!!!", item, a_l_number)); FATAL_GC_ERROR(); } } #endif //FL_VERIFICATION if (prev_item) { free_list_slot (prev_item) = next_item; } else { al->alloc_list_head() = next_item; } if (next_item) { free_list_prev (next_item) = prev_item; } if (al->alloc_list_tail() == item) { al->alloc_list_tail() = prev_item; } free_list_prev (item) = PREV_EMPTY; if (gen_number == max_generation) { dprintf (3, ("[g%2d, b%2d]ULN: %Ix->%Ix->%Ix (h: %Ix, t: %Ix)", gen_number, bn, free_list_prev (item), item, free_list_slot (item), al->alloc_list_head(), al->alloc_list_tail())); dprintf (3, ("[g%2d, b%2d]ULN: exit: h->N: %Ix, h->P: %Ix, t->N: %Ix, t->P: %Ix", gen_number, bn, (al->alloc_list_head() ? free_list_slot (al->alloc_list_head()) : 0), (al->alloc_list_head() ? free_list_prev (al->alloc_list_head()) : 0), (al->alloc_list_tail() ? free_list_slot (al->alloc_list_tail()) : 0), (al->alloc_list_tail() ? free_list_prev (al->alloc_list_tail()) : 0))); } } void allocator::unlink_item_no_undo (uint8_t* item, size_t size) { unsigned int bn = first_suitable_bucket (size); unlink_item_no_undo (bn, item, size); } void allocator::unlink_item_no_undo_added (unsigned int bn, uint8_t* item, uint8_t* previous_item) { alloc_list* al = &alloc_list_of (bn); uint8_t* next_item = free_list_slot (item); uint8_t* prev_item = free_list_prev (item); assert (prev_item == previous_item); if (prev_item) { free_list_slot (prev_item) = next_item; } else { al->added_alloc_list_head() = next_item; } if (next_item) { free_list_prev (next_item) = prev_item; } if (al->added_alloc_list_tail() == item) { al->added_alloc_list_tail() = prev_item; } free_list_prev (item) = PREV_EMPTY; if (gen_number == max_generation) { dprintf (3333, ("[g%2d, b%2d]ULNA: %Ix->%Ix->%Ix (h: %Ix, t: %Ix)", gen_number, bn, free_list_prev (item), item, free_list_slot (item), al->added_alloc_list_head(), al->added_alloc_list_tail())); dprintf (3333, ("[g%2d, b%2d]ULNA: exit: h->N: %Ix, h->P: %Ix, t->N: %Ix, t->P: %Ix", gen_number, bn, (al->added_alloc_list_head() ? free_list_slot (al->added_alloc_list_head()) : 0), (al->added_alloc_list_head() ? free_list_prev (al->added_alloc_list_head()) : 0), (al->added_alloc_list_tail() ? free_list_slot (al->added_alloc_list_tail()) : 0), (al->added_alloc_list_tail() ? free_list_prev (al->added_alloc_list_tail()) : 0))); } } int allocator::thread_item_front_added (uint8_t* item, size_t size) { unsigned int a_l_number = first_suitable_bucket (size); alloc_list* al = &alloc_list_of (a_l_number); free_list_slot (item) = al->added_alloc_list_head(); free_list_prev (item) = 0; // this list's UNDO is not useful. free_list_undo (item) = UNDO_EMPTY; if (al->added_alloc_list_head() != 0) { free_list_prev (al->added_alloc_list_head()) = item; } al->added_alloc_list_head() = item; if (al->added_alloc_list_tail() == 0) { al->added_alloc_list_tail() = item; } if (gen_number == max_generation) { dprintf (3333, ("[g%2d, b%2d]TFFA: exit: %Ix->%Ix->%Ix (h: %Ix, t: %Ix)", gen_number, a_l_number, free_list_prev (item), item, free_list_slot (item), al->added_alloc_list_head(), al->added_alloc_list_tail())); dprintf (3333, ("[g%2d, b%2d]TFFA: h->N: %Ix, h->P: %Ix, t->N: %Ix, t->P: %Ix", gen_number, a_l_number, (al->added_alloc_list_head() ? free_list_slot (al->added_alloc_list_head()) : 0), (al->added_alloc_list_head() ? free_list_prev (al->added_alloc_list_head()) : 0), (al->added_alloc_list_tail() ? free_list_slot (al->added_alloc_list_tail()) : 0), (al->added_alloc_list_tail() ? free_list_prev (al->added_alloc_list_tail()) : 0))); } return a_l_number; } #endif //DOUBLY_LINKED_FL void allocator::clear() { for (unsigned int i = 0; i < num_buckets; i++) { alloc_list_head_of (i) = 0; alloc_list_tail_of (i) = 0; } } //always thread to the end. void allocator::thread_item (uint8_t* item, size_t size) { unsigned int a_l_number = first_suitable_bucket (size); alloc_list* al = &alloc_list_of (a_l_number); uint8_t*& head = al->alloc_list_head(); uint8_t*& tail = al->alloc_list_tail(); if (al->alloc_list_head() == 0) { assert (al->alloc_list_tail() == 0); } free_list_slot (item) = 0; free_list_undo (item) = UNDO_EMPTY; assert (item != head); #ifdef DOUBLY_LINKED_FL if (gen_number == max_generation) { free_list_prev (item) = tail; } #endif //DOUBLY_LINKED_FL if (head == 0) { head = item; } else { assert ((free_list_slot(head) != 0) || (tail == head)); assert (item != tail); assert (free_list_slot(tail) == 0); free_list_slot (tail) = item; } tail = item; #ifdef DOUBLY_LINKED_FL if (gen_number == max_generation) { dprintf (3333, ("[g%2d, b%2d]TFE: %Ix->%Ix->%Ix (h: %Ix, t: %Ix)", gen_number, a_l_number, free_list_prev (item), item, free_list_slot (item), al->alloc_list_head(), al->alloc_list_tail())); dprintf (3333, ("[g%2d, b%2d]TFE: exit: h->N: %Ix, h->P: %Ix, t->N: %Ix, t->P: %Ix", gen_number, a_l_number, (al->alloc_list_head() ? free_list_slot (al->alloc_list_head()) : 0), (al->alloc_list_head() ? free_list_prev (al->alloc_list_head()) : 0), (al->alloc_list_tail() ? free_list_slot (al->alloc_list_tail()) : 0), (al->alloc_list_tail() ? free_list_prev (al->alloc_list_tail()) : 0))); } #endif //DOUBLY_LINKED_FL } void allocator::thread_item_front (uint8_t* item, size_t size) { unsigned int a_l_number = first_suitable_bucket (size); alloc_list* al = &alloc_list_of (a_l_number); if (al->alloc_list_head() == 0) { assert (al->alloc_list_tail() == 0); } free_list_slot (item) = al->alloc_list_head(); free_list_undo (item) = UNDO_EMPTY; if (al->alloc_list_tail() == 0) { assert (al->alloc_list_head() == 0); al->alloc_list_tail() = al->alloc_list_head(); } #ifdef DOUBLY_LINKED_FL if (gen_number == max_generation) { if (al->alloc_list_head() != 0) { free_list_prev (al->alloc_list_head()) = item; } } #endif //DOUBLY_LINKED_FL al->alloc_list_head() = item; if (al->alloc_list_tail() == 0) { al->alloc_list_tail() = item; } #ifdef DOUBLY_LINKED_FL if (gen_number == max_generation) { free_list_prev (item) = 0; dprintf (3333, ("[g%2d, b%2d]TFF: exit: %Ix->%Ix->%Ix (h: %Ix, t: %Ix)", gen_number, a_l_number, free_list_prev (item), item, free_list_slot (item), al->alloc_list_head(), al->alloc_list_tail())); dprintf (3333, ("[g%2d, b%2d]TFF: h->N: %Ix, h->P: %Ix, t->N: %Ix, t->P: %Ix", gen_number, a_l_number, (al->alloc_list_head() ? free_list_slot (al->alloc_list_head()) : 0), (al->alloc_list_head() ? free_list_prev (al->alloc_list_head()) : 0), (al->alloc_list_tail() ? free_list_slot (al->alloc_list_tail()) : 0), (al->alloc_list_tail() ? free_list_prev (al->alloc_list_tail()) : 0))); } #endif //DOUBLY_LINKED_FL } void allocator::copy_to_alloc_list (alloc_list* toalist) { for (unsigned int i = 0; i < num_buckets; i++) { toalist [i] = alloc_list_of (i); #ifdef FL_VERIFICATION size_t damage_count = alloc_list_damage_count_of (i); // We are only calling this method to copy to an empty list // so damage count is always 0 assert (damage_count == 0); uint8_t* free_item = alloc_list_head_of (i); size_t count = 0; while (free_item) { count++; free_item = free_list_slot (free_item); } toalist[i].item_count = count; #endif //FL_VERIFICATION } } void allocator::copy_from_alloc_list (alloc_list* fromalist) { BOOL repair_list = !discard_if_no_fit_p (); #ifdef DOUBLY_LINKED_FL BOOL bgc_repair_p = FALSE; if (gen_number == max_generation) { bgc_repair_p = TRUE; if (alloc_list_damage_count_of (0) != 0) { GCToOSInterface::DebugBreak(); } uint8_t* b0_head = alloc_list_head_of (0); if (b0_head) { free_list_prev (b0_head) = 0; } added_alloc_list_head_of (0) = 0; added_alloc_list_tail_of (0) = 0; } unsigned int start_index = (bgc_repair_p ? 1 : 0); #else unsigned int start_index = 0; #endif //DOUBLY_LINKED_FL for (unsigned int i = start_index; i < num_buckets; i++) { size_t count = alloc_list_damage_count_of (i); alloc_list_of (i) = fromalist [i]; assert (alloc_list_damage_count_of (i) == 0); if (repair_list) { //repair the the list //new items may have been added during the plan phase //items may have been unlinked. uint8_t* free_item = alloc_list_head_of (i); while (free_item && count) { assert (((CObjectHeader*)free_item)->IsFree()); if ((free_list_undo (free_item) != UNDO_EMPTY)) { count--; free_list_slot (free_item) = free_list_undo (free_item); free_list_undo (free_item) = UNDO_EMPTY; } free_item = free_list_slot (free_item); } #ifdef DOUBLY_LINKED_FL if (bgc_repair_p) { added_alloc_list_head_of (i) = 0; added_alloc_list_tail_of (i) = 0; } #endif //DOUBLY_LINKED_FL #ifdef FL_VERIFICATION free_item = alloc_list_head_of (i); size_t item_count = 0; while (free_item) { item_count++; free_item = free_list_slot (free_item); } assert (item_count == alloc_list_of (i).item_count); #endif //FL_VERIFICATION } #ifdef DEBUG uint8_t* tail_item = alloc_list_tail_of (i); assert ((tail_item == 0) || (free_list_slot (tail_item) == 0)); #endif } } void allocator::commit_alloc_list_changes() { BOOL repair_list = !discard_if_no_fit_p (); #ifdef DOUBLY_LINKED_FL BOOL bgc_repair_p = FALSE; if (gen_number == max_generation) { bgc_repair_p = TRUE; } #endif //DOUBLY_LINKED_FL if (repair_list) { for (unsigned int i = 0; i < num_buckets; i++) { //remove the undo info from list. uint8_t* free_item = alloc_list_head_of (i); #ifdef DOUBLY_LINKED_FL if (bgc_repair_p) { dprintf (3, ("C[b%2d] ENTRY: h: %Ix t: %Ix", i, alloc_list_head_of (i), alloc_list_tail_of (i))); } if (free_item && bgc_repair_p) { if (free_list_prev (free_item) != 0) free_list_prev (free_item) = 0; } #endif //DOUBLY_LINKED_FL size_t count = alloc_list_damage_count_of (i); while (free_item && count) { assert (((CObjectHeader*)free_item)->IsFree()); if (free_list_undo (free_item) != UNDO_EMPTY) { free_list_undo (free_item) = UNDO_EMPTY; #ifdef DOUBLY_LINKED_FL if (bgc_repair_p) { uint8_t* next_item = free_list_slot (free_item); if (next_item && (free_list_prev (next_item) != free_item)) free_list_prev (next_item) = free_item; } #endif //DOUBLY_LINKED_FL count--; } free_item = free_list_slot (free_item); } alloc_list_damage_count_of (i) = 0; #ifdef DOUBLY_LINKED_FL if (bgc_repair_p) { uint8_t* head = alloc_list_head_of (i); uint8_t* tail_added = added_alloc_list_tail_of (i); if (tail_added) { assert (free_list_slot (tail_added) == 0); if (head) { free_list_slot (tail_added) = head; free_list_prev (head) = tail_added; } } uint8_t* head_added = added_alloc_list_head_of (i); if (head_added) { alloc_list_head_of (i) = head_added; uint8_t* final_head = alloc_list_head_of (i); if (alloc_list_tail_of (i) == 0) { alloc_list_tail_of (i) = tail_added; } } added_alloc_list_head_of (i) = 0; added_alloc_list_tail_of (i) = 0; } #endif //DOUBLY_LINKED_FL } } } #ifdef USE_REGIONS void allocator::thread_sip_fl (heap_segment* region) { uint8_t* region_fl_head = region->free_list_head; uint8_t* region_fl_tail = region->free_list_tail; if (!region_fl_head) { assert (!region_fl_tail); assert (region->free_list_size == 0); return; } if (num_buckets == 1) { dprintf (REGIONS_LOG, ("threading gen%d region %Ix onto gen%d FL", heap_segment_gen_num (region), heap_segment_mem (region), gen_number)); alloc_list* al = &alloc_list_of (0); uint8_t*& head = al->alloc_list_head(); uint8_t*& tail = al->alloc_list_tail(); if (tail == 0) { assert (head == 0); head = region_fl_head; } else { free_list_slot (tail) = region_fl_head; } tail = region_fl_tail; } else { dprintf (REGIONS_LOG, ("threading gen%d region %Ix onto gen%d bucketed FL", heap_segment_gen_num (region), heap_segment_mem (region), gen_number)); // If we have a bucketed free list we'd need to go through the region's free list. uint8_t* region_fl_item = region_fl_head; size_t total_free_size = 0; while (region_fl_item) { uint8_t* next_fl_item = free_list_slot (region_fl_item); size_t size_item = size (region_fl_item); thread_item (region_fl_item, size_item); total_free_size += size_item; region_fl_item = next_fl_item; } assert (total_free_size == region->free_list_size); } } #endif //USE_REGIONS #ifdef FEATURE_EVENT_TRACE uint16_t allocator::count_largest_items (etw_bucket_info* bucket_info, size_t max_size, size_t max_item_count, size_t* recorded_fl_info_size) { assert (gen_number == max_generation); size_t size_counted_total = 0; size_t items_counted_total = 0; uint16_t bucket_info_index = 0; for (int i = (num_buckets - 1); i >= 0; i--) { uint32_t items_counted = 0; size_t size_counted = 0; uint8_t* free_item = alloc_list_head_of ((unsigned int)i); while (free_item) { assert (((CObjectHeader*)free_item)->IsFree()); size_t free_item_size = Align (size (free_item)); size_counted_total += free_item_size; size_counted += free_item_size; items_counted_total++; items_counted++; if ((size_counted_total > max_size) || (items_counted > max_item_count)) { bucket_info[bucket_info_index++].set ((uint16_t)i, items_counted, size_counted); *recorded_fl_info_size = size_counted_total; return bucket_info_index; } free_item = free_list_slot (free_item); } if (items_counted) { bucket_info[bucket_info_index++].set ((uint16_t)i, items_counted, size_counted); } } *recorded_fl_info_size = size_counted_total; return bucket_info_index; } #endif //FEATURE_EVENT_TRACE void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size, size_t size, alloc_context* acontext, uint32_t flags, heap_segment* seg, int align_const, int gen_number) { bool uoh_p = (gen_number > 0); GCSpinLock* msl = uoh_p ? &more_space_lock_uoh : &more_space_lock_soh; uint64_t& total_alloc_bytes = uoh_p ? total_alloc_bytes_uoh : total_alloc_bytes_soh; size_t aligned_min_obj_size = Align(min_obj_size, align_const); if (seg) { assert (heap_segment_used (seg) <= heap_segment_committed (seg)); } #ifdef MULTIPLE_HEAPS if (gen_number == 0) { if (!gen0_allocated_after_gc_p) { gen0_allocated_after_gc_p = true; } } #endif //MULTIPLE_HEAPS dprintf (3, ("Expanding segment allocation [%Ix, %Ix[", (size_t)start, (size_t)start + limit_size - aligned_min_obj_size)); if ((acontext->alloc_limit != start) && (acontext->alloc_limit + aligned_min_obj_size)!= start) { uint8_t* hole = acontext->alloc_ptr; if (hole != 0) { size_t ac_size = (acontext->alloc_limit - acontext->alloc_ptr); dprintf (3, ("filling up hole [%Ix, %Ix[", (size_t)hole, (size_t)hole + ac_size + aligned_min_obj_size)); // when we are finishing an allocation from a free list // we know that the free area was Align(min_obj_size) larger acontext->alloc_bytes -= ac_size; total_alloc_bytes -= ac_size; size_t free_obj_size = ac_size + aligned_min_obj_size; make_unused_array (hole, free_obj_size); generation_free_obj_space (generation_of (gen_number)) += free_obj_size; } acontext->alloc_ptr = start; } else { if (gen_number == 0) { #ifdef USE_REGIONS if (acontext->alloc_ptr == 0) { acontext->alloc_ptr = start; } else #endif //USE_REGIONS { size_t pad_size = aligned_min_obj_size; dprintf (3, ("contigous ac: making min obj gap %Ix->%Ix(%Id)", acontext->alloc_ptr, (acontext->alloc_ptr + pad_size), pad_size)); make_unused_array (acontext->alloc_ptr, pad_size); acontext->alloc_ptr += pad_size; } } } acontext->alloc_limit = (start + limit_size - aligned_min_obj_size); size_t added_bytes = limit_size - ((gen_number <= max_generation) ? aligned_min_obj_size : 0); acontext->alloc_bytes += added_bytes; total_alloc_bytes += added_bytes; size_t etw_allocation_amount = 0; bool fire_event_p = update_alloc_info (gen_number, added_bytes, &etw_allocation_amount); uint8_t* saved_used = 0; if (seg) { saved_used = heap_segment_used (seg); } if (seg == ephemeral_heap_segment) { //Sometimes the allocated size is advanced without clearing the //memory. Let's catch up here if (heap_segment_used (seg) < (alloc_allocated - plug_skew)) { heap_segment_used (seg) = alloc_allocated - plug_skew; assert (heap_segment_mem (seg) <= heap_segment_used (seg)); assert (heap_segment_used (seg) <= heap_segment_reserved (seg)); } } #ifdef BACKGROUND_GC else if (seg) { uint8_t* old_allocated = heap_segment_allocated (seg) - plug_skew - limit_size; #ifdef FEATURE_LOH_COMPACTION if (gen_number == loh_generation) { old_allocated -= Align (loh_padding_obj_size, align_const); } #endif //FEATURE_LOH_COMPACTION assert (heap_segment_used (seg) >= old_allocated); } #endif //BACKGROUND_GC // we are going to clear a right-edge exclusive span [clear_start, clear_limit) // but will adjust for cases when object is ok to stay dirty or the space has not seen any use yet // NB: the size and limit_size include syncblock, which is to the -1 of the object start // that effectively shifts the allocation by `plug_skew` uint8_t* clear_start = start - plug_skew; uint8_t* clear_limit = start + limit_size - plug_skew; if (flags & GC_ALLOC_ZEROING_OPTIONAL) { uint8_t* obj_start = acontext->alloc_ptr; assert(start >= obj_start); uint8_t* obj_end = obj_start + size - plug_skew; assert(obj_end >= clear_start); // if clearing at the object start, clear the syncblock. if(obj_start == start) { *(PTR_PTR)clear_start = 0; } // skip the rest of the object dprintf(3, ("zeroing optional: skipping object at %Ix->%Ix(%Id)", clear_start, obj_end, obj_end - clear_start)); clear_start = obj_end; } // fetch the ephemeral_heap_segment *before* we release the msl // - ephemeral_heap_segment may change due to other threads allocating heap_segment* gen0_segment = ephemeral_heap_segment; // check if space to clear is all dirty from prior use or only partially if ((seg == 0) || (clear_limit <= heap_segment_used (seg))) { add_saved_spinlock_info (uoh_p, me_release, mt_clr_mem); leave_spin_lock (msl); if (clear_start < clear_limit) { dprintf(3, ("clearing memory at %Ix for %d bytes", clear_start, clear_limit - clear_start)); memclr(clear_start, clear_limit - clear_start); } } else { // we only need to clear [clear_start, used) and only if clear_start < used uint8_t* used = heap_segment_used (seg); heap_segment_used (seg) = clear_limit; add_saved_spinlock_info (uoh_p, me_release, mt_clr_mem); leave_spin_lock (msl); if (clear_start < used) { if (used != saved_used) { FATAL_GC_ERROR(); } dprintf (2, ("clearing memory before used at %Ix for %Id bytes", clear_start, used - clear_start)); memclr (clear_start, used - clear_start); } } #ifdef FEATURE_EVENT_TRACE if (fire_event_p) { fire_etw_allocation_event (etw_allocation_amount, gen_number, acontext->alloc_ptr, size); } #endif //FEATURE_EVENT_TRACE //this portion can be done after we release the lock if (seg == gen0_segment || ((seg == nullptr) && (gen_number == 0) && (limit_size >= CLR_SIZE / 2))) { if (gen0_must_clear_bricks > 0) { //set the brick table to speed up find_object size_t b = brick_of (acontext->alloc_ptr); set_brick (b, acontext->alloc_ptr - brick_address (b)); b++; dprintf (3, ("Allocation Clearing bricks [%Ix, %Ix[", b, brick_of (align_on_brick (start + limit_size)))); volatile short* x = &brick_table [b]; short* end_x = &brick_table [brick_of (align_on_brick (start + limit_size))]; for (;x < end_x;x++) *x = -1; } else { gen0_bricks_cleared = FALSE; } } // verifying the memory is completely cleared. //if (!(flags & GC_ALLOC_ZEROING_OPTIONAL)) //{ // verify_mem_cleared(start - plug_skew, limit_size); //} } size_t gc_heap::new_allocation_limit (size_t size, size_t physical_limit, int gen_number) { dynamic_data* dd = dynamic_data_of (gen_number); ptrdiff_t new_alloc = dd_new_allocation (dd); assert (new_alloc == (ptrdiff_t)Align (new_alloc, get_alignment_constant (gen_number < uoh_start_generation))); ptrdiff_t logical_limit = max (new_alloc, (ptrdiff_t)size); size_t limit = min (logical_limit, (ptrdiff_t)physical_limit); assert (limit == Align (limit, get_alignment_constant (gen_number <= max_generation))); return limit; } size_t gc_heap::limit_from_size (size_t size, uint32_t flags, size_t physical_limit, int gen_number, int align_const) { size_t padded_size = size + Align (min_obj_size, align_const); // for LOH this is not true...we could select a physical_limit that's exactly the same // as size. assert ((gen_number != 0) || (physical_limit >= padded_size)); // For SOH if the size asked for is very small, we want to allocate more than just what's asked for if possible. // Unless we were told not to clean, then we will not force it. size_t min_size_to_allocate = ((gen_number == 0 && !(flags & GC_ALLOC_ZEROING_OPTIONAL)) ? allocation_quantum : 0); size_t desired_size_to_allocate = max (padded_size, min_size_to_allocate); size_t new_physical_limit = min (physical_limit, desired_size_to_allocate); size_t new_limit = new_allocation_limit (padded_size, new_physical_limit, gen_number); assert (new_limit >= (size + Align (min_obj_size, align_const))); dprintf (3, ("h%d requested to allocate %Id bytes, actual size is %Id, phy limit: %Id", heap_number, size, new_limit, physical_limit)); return new_limit; } void gc_heap::add_to_oom_history_per_heap() { oom_history* current_hist = &oomhist_per_heap[oomhist_index_per_heap]; memcpy (current_hist, &oom_info, sizeof (oom_info)); oomhist_index_per_heap++; if (oomhist_index_per_heap == max_oom_history_count) { oomhist_index_per_heap = 0; } } void gc_heap::handle_oom (oom_reason reason, size_t alloc_size, uint8_t* allocated, uint8_t* reserved) { if (reason == oom_budget) { alloc_size = dd_min_size (dynamic_data_of (0)) / 2; } if ((reason == oom_budget) && ((!fgm_result.loh_p) && (fgm_result.fgm != fgm_no_failure))) { // This means during the last GC we needed to reserve and/or commit more memory // but we couldn't. We proceeded with the GC and ended up not having enough // memory at the end. This is a legitimate OOM situtation. Otherwise we // probably made a mistake and didn't expand the heap when we should have. reason = oom_low_mem; } oom_info.reason = reason; oom_info.allocated = allocated; oom_info.reserved = reserved; oom_info.alloc_size = alloc_size; oom_info.gc_index = settings.gc_index; oom_info.fgm = fgm_result.fgm; oom_info.size = fgm_result.size; oom_info.available_pagefile_mb = fgm_result.available_pagefile_mb; oom_info.loh_p = fgm_result.loh_p; add_to_oom_history_per_heap(); fgm_result.fgm = fgm_no_failure; // Break early - before the more_space_lock is release so no other threads // could have allocated on the same heap when OOM happened. if (GCConfig::GetBreakOnOOM()) { GCToOSInterface::DebugBreak(); } } #ifdef BACKGROUND_GC BOOL gc_heap::background_allowed_p() { return ( gc_can_use_concurrent && ((settings.pause_mode == pause_interactive) || (settings.pause_mode == pause_sustained_low_latency)) ); } #endif //BACKGROUND_GC void gc_heap::check_for_full_gc (int gen_num, size_t size) { BOOL should_notify = FALSE; // if we detect full gc because of the allocation budget specified this is TRUE; // it's FALSE if it's due to other factors. BOOL alloc_factor = TRUE; int n_initial = gen_num; BOOL local_blocking_collection = FALSE; BOOL local_elevation_requested = FALSE; int new_alloc_remain_percent = 0; if (full_gc_approach_event_set) { return; } if (gen_num < max_generation) { gen_num = max_generation; } dynamic_data* dd_full = dynamic_data_of (gen_num); ptrdiff_t new_alloc_remain = 0; uint32_t pct = (gen_num >= uoh_start_generation) ? fgn_loh_percent : fgn_maxgen_percent; for (int gen_index = 0; gen_index < total_generation_count; gen_index++) { dprintf (2, ("FGN: h#%d: gen%d: %Id(%Id)", heap_number, gen_index, dd_new_allocation (dynamic_data_of (gen_index)), dd_desired_allocation (dynamic_data_of (gen_index)))); } // For small object allocations we only check every fgn_check_quantum bytes. if (n_initial == 0) { dprintf (2, ("FGN: gen0 last recorded alloc: %Id", fgn_last_alloc)); dynamic_data* dd_0 = dynamic_data_of (n_initial); if (((fgn_last_alloc - dd_new_allocation (dd_0)) < fgn_check_quantum) && (dd_new_allocation (dd_0) >= 0)) { return; } else { fgn_last_alloc = dd_new_allocation (dd_0); dprintf (2, ("FGN: gen0 last recorded alloc is now: %Id", fgn_last_alloc)); } // We don't consider the size that came from soh 'cause it doesn't contribute to the // gen2 budget. size = 0; } int n = 0; for (int i = 1; i <= max_generation; i++) { if (get_new_allocation (i) <= 0) { n = i; } else break; } dprintf (2, ("FGN: h#%d: gen%d budget exceeded", heap_number, n)); if (gen_num == max_generation) { // If it's small object heap we should first see if we will even be looking at gen2 budget // in the next GC or not. If not we should go directly to checking other factors. if (n < (max_generation - 1)) { goto check_other_factors; } } new_alloc_remain = dd_new_allocation (dd_full) - size; new_alloc_remain_percent = (int)(((float)(new_alloc_remain) / (float)dd_desired_allocation (dd_full)) * 100); dprintf (2, ("FGN: alloc threshold for gen%d is %d%%, current threshold is %d%%", gen_num, pct, new_alloc_remain_percent)); if (new_alloc_remain_percent <= (int)pct) { #ifdef BACKGROUND_GC // If background GC is enabled, we still want to check whether this will // be a blocking GC or not because we only want to notify when it's a // blocking full GC. if (background_allowed_p()) { goto check_other_factors; } #endif //BACKGROUND_GC should_notify = TRUE; goto done; } check_other_factors: dprintf (2, ("FGC: checking other factors")); n = generation_to_condemn (n, &local_blocking_collection, &local_elevation_requested, TRUE); if (local_elevation_requested && (n == max_generation)) { if (settings.should_lock_elevation) { int local_elevation_locked_count = settings.elevation_locked_count + 1; if (local_elevation_locked_count != 6) { dprintf (2, ("FGN: lock count is %d - Condemning max_generation-1", local_elevation_locked_count)); n = max_generation - 1; } } } dprintf (2, ("FGN: we estimate gen%d will be collected", n)); #ifdef BACKGROUND_GC // When background GC is enabled it decreases the accuracy of our predictability - // by the time the GC happens, we may not be under BGC anymore. If we try to // predict often enough it should be ok. if ((n == max_generation) && (gc_heap::background_running_p())) { n = max_generation - 1; dprintf (2, ("FGN: bgc - 1 instead of 2")); } if ((n == max_generation) && !local_blocking_collection) { if (!background_allowed_p()) { local_blocking_collection = TRUE; } } #endif //BACKGROUND_GC dprintf (2, ("FGN: we estimate gen%d will be collected: %s", n, (local_blocking_collection ? "blocking" : "background"))); if ((n == max_generation) && local_blocking_collection) { alloc_factor = FALSE; should_notify = TRUE; goto done; } done: if (should_notify) { dprintf (2, ("FGN: gen%d detecting full GC approaching(%s) (GC#%d) (%Id%% left in gen%d)", n_initial, (alloc_factor ? "alloc" : "other"), dd_collection_count (dynamic_data_of (0)), new_alloc_remain_percent, gen_num)); send_full_gc_notification (n_initial, alloc_factor); } } void gc_heap::send_full_gc_notification (int gen_num, BOOL due_to_alloc_p) { if (!full_gc_approach_event_set) { assert (full_gc_approach_event.IsValid()); FIRE_EVENT(GCFullNotify_V1, gen_num, due_to_alloc_p); full_gc_end_event.Reset(); full_gc_approach_event.Set(); full_gc_approach_event_set = true; } } wait_full_gc_status gc_heap::full_gc_wait (GCEvent *event, int time_out_ms) { #ifdef MULTIPLE_HEAPS gc_heap* hp = gc_heap::g_heaps[0]; #else gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS if (hp->fgn_maxgen_percent == 0) { return wait_full_gc_na; } uint32_t wait_result = user_thread_wait(event, FALSE, time_out_ms); if ((wait_result == WAIT_OBJECT_0) || (wait_result == WAIT_TIMEOUT)) { if (hp->fgn_maxgen_percent == 0) { return wait_full_gc_cancelled; } if (wait_result == WAIT_OBJECT_0) { #ifdef BACKGROUND_GC if (fgn_last_gc_was_concurrent) { fgn_last_gc_was_concurrent = FALSE; return wait_full_gc_na; } else #endif //BACKGROUND_GC { return wait_full_gc_success; } } else { return wait_full_gc_timeout; } } else { return wait_full_gc_failed; } } size_t gc_heap::get_full_compact_gc_count() { return full_gc_counts[gc_type_compacting]; } // DTREVIEW - we should check this in dt_low_ephemeral_space_p // as well. inline BOOL gc_heap::short_on_end_of_seg (heap_segment* seg) { uint8_t* allocated = heap_segment_allocated (seg); #ifdef USE_REGIONS BOOL sufficient_p = sufficient_space_regions (end_gen0_region_space, end_space_after_gc()); #else BOOL sufficient_p = sufficient_space_end_seg (allocated, heap_segment_committed (seg), heap_segment_reserved (seg), end_space_after_gc()); #endif //USE_REGIONS if (!sufficient_p) { if (sufficient_gen0_space_p) { dprintf (GTC_LOG, ("gen0 has enough free space")); } sufficient_p = sufficient_gen0_space_p; } return !sufficient_p; } inline BOOL gc_heap::a_fit_free_list_p (int gen_number, size_t size, alloc_context* acontext, uint32_t flags, int align_const) { BOOL can_fit = FALSE; generation* gen = generation_of (gen_number); allocator* gen_allocator = generation_allocator (gen); for (unsigned int a_l_idx = gen_allocator->first_suitable_bucket(size); a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++) { uint8_t* free_list = gen_allocator->alloc_list_head_of (a_l_idx); uint8_t* prev_free_item = 0; while (free_list != 0) { dprintf (3, ("considering free list %Ix", (size_t)free_list)); size_t free_list_size = unused_array_size (free_list); if ((size + Align (min_obj_size, align_const)) <= free_list_size) { dprintf (3, ("Found adequate unused area: [%Ix, size: %Id", (size_t)free_list, free_list_size)); gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE); // We ask for more Align (min_obj_size) // to make sure that we can insert a free object // in adjust_limit will set the limit lower size_t limit = limit_from_size (size, flags, free_list_size, gen_number, align_const); dd_new_allocation (dynamic_data_of (gen_number)) -= limit; uint8_t* remain = (free_list + limit); size_t remain_size = (free_list_size - limit); if (remain_size >= Align(min_free_list, align_const)) { make_unused_array (remain, remain_size); gen_allocator->thread_item_front (remain, remain_size); assert (remain_size >= Align (min_obj_size, align_const)); } else { //absorb the entire free list limit += remain_size; } generation_free_list_space (gen) -= limit; assert ((ptrdiff_t)generation_free_list_space (gen) >= 0); adjust_limit_clr (free_list, limit, size, acontext, flags, 0, align_const, gen_number); can_fit = TRUE; goto end; } else if (gen_allocator->discard_if_no_fit_p()) { assert (prev_free_item == 0); dprintf (3, ("couldn't use this free area, discarding")); generation_free_obj_space (gen) += free_list_size; gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE); generation_free_list_space (gen) -= free_list_size; assert ((ptrdiff_t)generation_free_list_space (gen) >= 0); } else { prev_free_item = free_list; } free_list = free_list_slot (free_list); } } end: return can_fit; } #ifdef BACKGROUND_GC void gc_heap::bgc_uoh_alloc_clr (uint8_t* alloc_start, size_t size, alloc_context* acontext, uint32_t flags, int gen_number, int align_const, int lock_index, BOOL check_used_p, heap_segment* seg) { make_unused_array (alloc_start, size); #ifdef DOUBLY_LINKED_FL clear_prev_bit (alloc_start, size); #endif //DOUBLY_LINKED_FL size_t size_of_array_base = sizeof(ArrayBase); bgc_alloc_lock->uoh_alloc_done_with_index (lock_index); // clear memory while not holding the lock. size_t size_to_skip = size_of_array_base; size_t size_to_clear = size - size_to_skip - plug_skew; size_t saved_size_to_clear = size_to_clear; if (check_used_p) { uint8_t* end = alloc_start + size - plug_skew; uint8_t* used = heap_segment_used (seg); if (used < end) { if ((alloc_start + size_to_skip) < used) { size_to_clear = used - (alloc_start + size_to_skip); } else { size_to_clear = 0; } dprintf (2, ("bgc uoh: setting used to %Ix", end)); heap_segment_used (seg) = end; } dprintf (2, ("bgc uoh: used: %Ix, alloc: %Ix, end of alloc: %Ix, clear %Id bytes", used, alloc_start, end, size_to_clear)); } else { dprintf (2, ("bgc uoh: [%Ix-[%Ix(%Id)", alloc_start, alloc_start+size, size)); } #ifdef VERIFY_HEAP // since we filled in 0xcc for free object when we verify heap, // we need to make sure we clear those bytes. if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { if (size_to_clear < saved_size_to_clear) { size_to_clear = saved_size_to_clear; } } #endif //VERIFY_HEAP size_t allocated_size = size - Align (min_obj_size, align_const); total_alloc_bytes_uoh += allocated_size; size_t etw_allocation_amount = 0; bool fire_event_p = update_alloc_info (gen_number, allocated_size, &etw_allocation_amount); dprintf (SPINLOCK_LOG, ("[%d]Lmsl to clear uoh obj", heap_number)); add_saved_spinlock_info (true, me_release, mt_clr_large_mem); leave_spin_lock (&more_space_lock_uoh); #ifdef FEATURE_EVENT_TRACE if (fire_event_p) { fire_etw_allocation_event (etw_allocation_amount, gen_number, alloc_start, size); } #endif //FEATURE_EVENT_TRACE ((void**) alloc_start)[-1] = 0; //clear the sync block if (!(flags & GC_ALLOC_ZEROING_OPTIONAL)) { memclr(alloc_start + size_to_skip, size_to_clear); } bgc_alloc_lock->uoh_alloc_set (alloc_start); acontext->alloc_ptr = alloc_start; acontext->alloc_limit = (alloc_start + size - Align (min_obj_size, align_const)); // need to clear the rest of the object before we hand it out. clear_unused_array(alloc_start, size); } #endif //BACKGROUND_GC BOOL gc_heap::a_fit_free_list_uoh_p (size_t size, alloc_context* acontext, uint32_t flags, int align_const, int gen_number) { BOOL can_fit = FALSE; generation* gen = generation_of (gen_number); allocator* allocator = generation_allocator (gen); #ifdef FEATURE_LOH_COMPACTION size_t loh_pad = gen_number == loh_generation ? Align (loh_padding_obj_size, align_const) : 0; #endif //FEATURE_LOH_COMPACTION #ifdef BACKGROUND_GC int cookie = -1; #endif //BACKGROUND_GC for (unsigned int a_l_idx = allocator->first_suitable_bucket(size); a_l_idx < allocator->number_of_buckets(); a_l_idx++) { uint8_t* free_list = allocator->alloc_list_head_of (a_l_idx); uint8_t* prev_free_item = 0; while (free_list != 0) { dprintf (3, ("considering free list %Ix", (size_t)free_list)); size_t free_list_size = unused_array_size(free_list); ptrdiff_t diff = free_list_size - size; #ifdef FEATURE_LOH_COMPACTION diff -= loh_pad; #endif //FEATURE_LOH_COMPACTION // must fit exactly or leave formattable space if ((diff == 0) || (diff >= (ptrdiff_t)Align (min_obj_size, align_const))) { #ifdef BACKGROUND_GC cookie = bgc_alloc_lock->uoh_alloc_set (free_list); bgc_track_uoh_alloc(); #endif //BACKGROUND_GC allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE); remove_gen_free (gen_number, free_list_size); // Substract min obj size because limit_from_size adds it. Not needed for LOH size_t limit = limit_from_size (size - Align(min_obj_size, align_const), flags, free_list_size, gen_number, align_const); dd_new_allocation (dynamic_data_of (gen_number)) -= limit; #ifdef FEATURE_LOH_COMPACTION if (loh_pad) { make_unused_array (free_list, loh_pad); generation_free_obj_space (gen) += loh_pad; limit -= loh_pad; free_list += loh_pad; free_list_size -= loh_pad; } #endif //FEATURE_LOH_COMPACTION uint8_t* remain = (free_list + limit); size_t remain_size = (free_list_size - limit); if (remain_size != 0) { assert (remain_size >= Align (min_obj_size, align_const)); make_unused_array (remain, remain_size); } if (remain_size >= Align(min_free_list, align_const)) { uoh_thread_gap_front (remain, remain_size, gen); add_gen_free (gen_number, remain_size); assert (remain_size >= Align (min_obj_size, align_const)); } else { generation_free_obj_space (gen) += remain_size; } generation_free_list_space (gen) -= free_list_size; assert ((ptrdiff_t)generation_free_list_space (gen) >= 0); generation_free_list_allocated (gen) += limit; dprintf (3, ("found fit on loh at %Ix", free_list)); #ifdef BACKGROUND_GC if (cookie != -1) { bgc_uoh_alloc_clr (free_list, limit, acontext, flags, gen_number, align_const, cookie, FALSE, 0); } else #endif //BACKGROUND_GC { adjust_limit_clr (free_list, limit, size, acontext, flags, 0, align_const, gen_number); } //fix the limit to compensate for adjust_limit_clr making it too short acontext->alloc_limit += Align (min_obj_size, align_const); can_fit = TRUE; goto exit; } prev_free_item = free_list; free_list = free_list_slot (free_list); } } exit: return can_fit; } BOOL gc_heap::a_fit_segment_end_p (int gen_number, heap_segment* seg, size_t size, alloc_context* acontext, uint32_t flags, int align_const, BOOL* commit_failed_p) { *commit_failed_p = FALSE; size_t limit = 0; bool hard_limit_short_seg_end_p = false; #ifdef BACKGROUND_GC int cookie = -1; #endif //BACKGROUND_GC uint8_t*& allocated = ((gen_number == 0) ? alloc_allocated : heap_segment_allocated(seg)); size_t pad = Align (min_obj_size, align_const); #ifdef FEATURE_LOH_COMPACTION size_t loh_pad = Align (loh_padding_obj_size, align_const); if (gen_number == loh_generation) { pad += loh_pad; } #endif //FEATURE_LOH_COMPACTION uint8_t* end = heap_segment_committed (seg) - pad; if (a_size_fit_p (size, allocated, end, align_const)) { limit = limit_from_size (size, flags, (end - allocated), gen_number, align_const); goto found_fit; } end = heap_segment_reserved (seg) - pad; if ((heap_segment_reserved (seg) != heap_segment_committed (seg)) && (a_size_fit_p (size, allocated, end, align_const))) { limit = limit_from_size (size, flags, (end - allocated), gen_number, align_const); if (grow_heap_segment (seg, (allocated + limit), &hard_limit_short_seg_end_p)) { goto found_fit; } else { if (!hard_limit_short_seg_end_p) { dprintf (2, ("can't grow segment, doing a full gc")); *commit_failed_p = TRUE; } else { assert (heap_hard_limit); } } } goto found_no_fit; found_fit: dd_new_allocation (dynamic_data_of (gen_number)) -= limit; #ifdef BACKGROUND_GC if (gen_number != 0) { cookie = bgc_alloc_lock->uoh_alloc_set (allocated); bgc_track_uoh_alloc(); } #endif //BACKGROUND_GC #ifdef FEATURE_LOH_COMPACTION if (gen_number == loh_generation) { make_unused_array (allocated, loh_pad); generation_free_obj_space (generation_of (gen_number)) += loh_pad; allocated += loh_pad; limit -= loh_pad; } #endif //FEATURE_LOH_COMPACTION #if defined (VERIFY_HEAP) && defined (_DEBUG) // we are responsible for cleaning the syncblock and we will do it later // as a part of cleanup routine and when not holding the heap lock. // However, once we move "allocated" forward and if another thread initiate verification of // the previous object, it may consider the syncblock in the "next" eligible for validation. // (see also: object.cpp/Object::ValidateInner) // Make sure it will see cleaned up state to prevent triggering occasional verification failures. // And make sure the write happens before updating "allocated" VolatileStore(((void**)allocated - 1), (void*)0); //clear the sync block #endif //VERIFY_HEAP && _DEBUG uint8_t* old_alloc; old_alloc = allocated; dprintf (3, ("found fit at end of seg: %Ix", old_alloc)); #ifdef BACKGROUND_GC if (cookie != -1) { allocated += limit; bgc_uoh_alloc_clr (old_alloc, limit, acontext, flags, gen_number, align_const, cookie, TRUE, seg); } else #endif //BACKGROUND_GC { // In a contiguous AC case with GC_ALLOC_ZEROING_OPTIONAL, deduct unspent space from the limit to // clear only what is necessary. if ((flags & GC_ALLOC_ZEROING_OPTIONAL) && ((allocated == acontext->alloc_limit) || (allocated == (acontext->alloc_limit + Align (min_obj_size, align_const))))) { assert(gen_number == 0); assert(allocated > acontext->alloc_ptr); size_t extra = allocated - acontext->alloc_ptr; limit -= extra; // Since we are not consuming all the memory we already deducted from the budget, // we should put the extra back. dynamic_data* dd = dynamic_data_of (0); dd_new_allocation (dd) += extra; // add space for an AC continuity divider limit += Align(min_obj_size, align_const); } allocated += limit; adjust_limit_clr (old_alloc, limit, size, acontext, flags, seg, align_const, gen_number); } return TRUE; found_no_fit: return FALSE; } BOOL gc_heap::uoh_a_fit_segment_end_p (int gen_number, size_t size, alloc_context* acontext, uint32_t flags, int align_const, BOOL* commit_failed_p, oom_reason* oom_r) { *commit_failed_p = FALSE; generation* gen = generation_of (gen_number); heap_segment* seg = generation_allocation_segment (gen); BOOL can_allocate_p = FALSE; while (seg) { #ifdef BACKGROUND_GC if (seg->flags & heap_segment_flags_uoh_delete) { dprintf (3, ("h%d skipping seg %Ix to be deleted", heap_number, (size_t)seg)); } else #endif //BACKGROUND_GC { if (a_fit_segment_end_p (gen_number, seg, (size - Align (min_obj_size, align_const)), acontext, flags, align_const, commit_failed_p)) { acontext->alloc_limit += Align (min_obj_size, align_const); can_allocate_p = TRUE; break; } if (*commit_failed_p) { *oom_r = oom_cant_commit; break; } } seg = heap_segment_next_rw (seg); } if (can_allocate_p) { generation_end_seg_allocated (gen) += size; } return can_allocate_p; } #ifdef BACKGROUND_GC inline void gc_heap::wait_for_background (alloc_wait_reason awr, bool loh_p) { GCSpinLock* msl = loh_p ? &more_space_lock_uoh : &more_space_lock_soh; dprintf (2, ("BGC is already in progress, waiting for it to finish")); add_saved_spinlock_info (loh_p, me_release, mt_wait_bgc); leave_spin_lock (msl); background_gc_wait (awr); enter_spin_lock (msl); add_saved_spinlock_info (loh_p, me_acquire, mt_wait_bgc); } bool gc_heap::wait_for_bgc_high_memory (alloc_wait_reason awr, bool loh_p) { bool wait_p = false; if (gc_heap::background_running_p()) { uint32_t memory_load; get_memory_info (&memory_load); if (memory_load >= m_high_memory_load_th) { wait_p = true; dprintf (GTC_LOG, ("high mem - wait for BGC to finish, wait reason: %d", awr)); wait_for_background (awr, loh_p); } } return wait_p; } #endif //BACKGROUND_GC // We request to trigger an ephemeral GC but we may get a full compacting GC. // return TRUE if that's the case. BOOL gc_heap::trigger_ephemeral_gc (gc_reason gr) { #ifdef BACKGROUND_GC wait_for_bgc_high_memory (awr_loh_oos_bgc, false); #endif //BACKGROUND_GC BOOL did_full_compact_gc = FALSE; dprintf (1, ("h%d triggering a gen1 GC", heap_number)); size_t last_full_compact_gc_count = get_full_compact_gc_count(); vm_heap->GarbageCollectGeneration(max_generation - 1, gr); #ifdef MULTIPLE_HEAPS enter_spin_lock (&more_space_lock_soh); add_saved_spinlock_info (false, me_acquire, mt_t_eph_gc); #endif //MULTIPLE_HEAPS size_t current_full_compact_gc_count = get_full_compact_gc_count(); if (current_full_compact_gc_count > last_full_compact_gc_count) { dprintf (2, ("attempted to trigger an ephemeral GC and got a full compacting GC")); did_full_compact_gc = TRUE; } return did_full_compact_gc; } BOOL gc_heap::soh_try_fit (int gen_number, size_t size, alloc_context* acontext, uint32_t flags, int align_const, BOOL* commit_failed_p, BOOL* short_seg_end_p) { BOOL can_allocate = TRUE; if (short_seg_end_p) { *short_seg_end_p = FALSE; } can_allocate = a_fit_free_list_p (gen_number, size, acontext, flags, align_const); if (!can_allocate) { if (short_seg_end_p) { *short_seg_end_p = short_on_end_of_seg (ephemeral_heap_segment); } // If the caller doesn't care, we always try to fit at the end of seg; // otherwise we would only try if we are actually not short at end of seg. if (!short_seg_end_p || !(*short_seg_end_p)) { #ifdef USE_REGIONS while (ephemeral_heap_segment) #endif //USE_REGIONS { can_allocate = a_fit_segment_end_p (gen_number, ephemeral_heap_segment, size, acontext, flags, align_const, commit_failed_p); #ifdef USE_REGIONS if (can_allocate) { break; } dprintf (REGIONS_LOG, ("h%d fixing region %Ix end to alloc ptr: %Ix, alloc_allocated %Ix", heap_number, heap_segment_mem (ephemeral_heap_segment), acontext->alloc_ptr, alloc_allocated)); fix_allocation_context (acontext, TRUE, FALSE); fix_youngest_allocation_area(); heap_segment* next_seg = heap_segment_next (ephemeral_heap_segment); bool new_seg = false; if (!next_seg) { assert (ephemeral_heap_segment == generation_tail_region (generation_of (gen_number))); next_seg = get_new_region (gen_number); new_seg = true; } if (next_seg) { dprintf (REGIONS_LOG, ("eph seg %Ix -> next %Ix", heap_segment_mem (ephemeral_heap_segment), heap_segment_mem (next_seg))); ephemeral_heap_segment = next_seg; if (new_seg) { GCToEEInterface::DiagAddNewRegion( heap_segment_gen_num (next_seg), heap_segment_mem (next_seg), heap_segment_allocated (next_seg), heap_segment_reserved (next_seg) ); } } else { *commit_failed_p = TRUE; dprintf (REGIONS_LOG, ("couldn't get a new ephemeral region")); return FALSE; } alloc_allocated = heap_segment_allocated (ephemeral_heap_segment); dprintf (REGIONS_LOG, ("h%d alloc_allocated is now %Ix", heap_number, alloc_allocated)); #endif //USE_REGIONS } } } return can_allocate; } allocation_state gc_heap::allocate_soh (int gen_number, size_t size, alloc_context* acontext, uint32_t flags, int align_const) { #if defined (BACKGROUND_GC) && !defined (MULTIPLE_HEAPS) if (gc_heap::background_running_p()) { background_soh_alloc_count++; if ((background_soh_alloc_count % bgc_alloc_spin_count) == 0) { add_saved_spinlock_info (false, me_release, mt_alloc_small); leave_spin_lock (&more_space_lock_soh); bool cooperative_mode = enable_preemptive(); GCToOSInterface::Sleep (bgc_alloc_spin); disable_preemptive (cooperative_mode); enter_spin_lock (&more_space_lock_soh); add_saved_spinlock_info (false, me_acquire, mt_alloc_small); } else { //GCToOSInterface::YieldThread (0); } } #endif //BACKGROUND_GC && !MULTIPLE_HEAPS gc_reason gr = reason_oos_soh; oom_reason oom_r = oom_no_failure; // No variable values should be "carried over" from one state to the other. // That's why there are local variable for each state allocation_state soh_alloc_state = a_state_start; // If we can get a new seg it means allocation will succeed. while (1) { dprintf (3, ("[h%d]soh state is %s", heap_number, allocation_state_str[soh_alloc_state])); switch (soh_alloc_state) { case a_state_can_allocate: case a_state_cant_allocate: { goto exit; } case a_state_start: { soh_alloc_state = a_state_try_fit; break; } case a_state_try_fit: { BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, NULL); soh_alloc_state = (can_use_existing_p ? a_state_can_allocate : (commit_failed_p ? a_state_trigger_full_compact_gc : a_state_trigger_ephemeral_gc)); break; } case a_state_try_fit_after_bgc: { BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; BOOL short_seg_end_p = FALSE; can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &short_seg_end_p); soh_alloc_state = (can_use_existing_p ? a_state_can_allocate : (short_seg_end_p ? a_state_trigger_2nd_ephemeral_gc : a_state_trigger_full_compact_gc)); break; } case a_state_try_fit_after_cg: { BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; BOOL short_seg_end_p = FALSE; can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &short_seg_end_p); if (can_use_existing_p) { soh_alloc_state = a_state_can_allocate; } #ifdef MULTIPLE_HEAPS else if (gen0_allocated_after_gc_p) { // some other threads already grabbed the more space lock and allocated // so we should attempt an ephemeral GC again. soh_alloc_state = a_state_trigger_ephemeral_gc; } #endif //MULTIPLE_HEAPS else if (short_seg_end_p) { soh_alloc_state = a_state_cant_allocate; oom_r = oom_budget; } else { assert (commit_failed_p || heap_hard_limit); soh_alloc_state = a_state_cant_allocate; oom_r = oom_cant_commit; } break; } case a_state_check_and_wait_for_bgc: { BOOL bgc_in_progress_p = FALSE; BOOL did_full_compacting_gc = FALSE; bgc_in_progress_p = check_and_wait_for_bgc (awr_gen0_oos_bgc, &did_full_compacting_gc, false); soh_alloc_state = (did_full_compacting_gc ? a_state_try_fit_after_cg : a_state_try_fit_after_bgc); break; } case a_state_trigger_ephemeral_gc: { BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; BOOL short_seg_end_p = FALSE; BOOL bgc_in_progress_p = FALSE; BOOL did_full_compacting_gc = FALSE; did_full_compacting_gc = trigger_ephemeral_gc (gr); if (did_full_compacting_gc) { soh_alloc_state = a_state_try_fit_after_cg; } else { can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &short_seg_end_p); #ifdef BACKGROUND_GC bgc_in_progress_p = gc_heap::background_running_p(); #endif //BACKGROUND_GC if (can_use_existing_p) { soh_alloc_state = a_state_can_allocate; } else { if (short_seg_end_p) { if (should_expand_in_full_gc) { dprintf (2, ("gen1 GC wanted to expand!")); soh_alloc_state = a_state_trigger_full_compact_gc; } else { soh_alloc_state = (bgc_in_progress_p ? a_state_check_and_wait_for_bgc : a_state_trigger_full_compact_gc); } } else if (commit_failed_p) { soh_alloc_state = a_state_trigger_full_compact_gc; } else { #ifdef MULTIPLE_HEAPS // some other threads already grabbed the more space lock and allocated // so we should attempt an ephemeral GC again. assert (gen0_allocated_after_gc_p); soh_alloc_state = a_state_trigger_ephemeral_gc; #else //MULTIPLE_HEAPS assert (!"shouldn't get here"); #endif //MULTIPLE_HEAPS } } } break; } case a_state_trigger_2nd_ephemeral_gc: { BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; BOOL short_seg_end_p = FALSE; BOOL did_full_compacting_gc = FALSE; did_full_compacting_gc = trigger_ephemeral_gc (gr); if (did_full_compacting_gc) { soh_alloc_state = a_state_try_fit_after_cg; } else { can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &short_seg_end_p); if (short_seg_end_p || commit_failed_p) { soh_alloc_state = a_state_trigger_full_compact_gc; } else { assert (can_use_existing_p); soh_alloc_state = a_state_can_allocate; } } break; } case a_state_trigger_full_compact_gc: { if (fgn_maxgen_percent) { dprintf (2, ("FGN: SOH doing last GC before we throw OOM")); send_full_gc_notification (max_generation, FALSE); } BOOL got_full_compacting_gc = FALSE; got_full_compacting_gc = trigger_full_compact_gc (gr, &oom_r, false); soh_alloc_state = (got_full_compacting_gc ? a_state_try_fit_after_cg : a_state_cant_allocate); break; } default: { assert (!"Invalid state!"); break; } } } exit: if (soh_alloc_state == a_state_cant_allocate) { assert (oom_r != oom_no_failure); handle_oom (oom_r, size, heap_segment_allocated (ephemeral_heap_segment), heap_segment_reserved (ephemeral_heap_segment)); add_saved_spinlock_info (false, me_release, mt_alloc_small_cant); leave_spin_lock (&more_space_lock_soh); } assert ((soh_alloc_state == a_state_can_allocate) || (soh_alloc_state == a_state_cant_allocate) || (soh_alloc_state == a_state_retry_allocate)); return soh_alloc_state; } #ifdef BACKGROUND_GC inline void gc_heap::bgc_track_uoh_alloc() { if (current_c_gc_state == c_gc_state_planning) { Interlocked::Increment (&uoh_alloc_thread_count); dprintf (3, ("h%d: inc lc: %d", heap_number, (int32_t)uoh_alloc_thread_count)); } } inline void gc_heap::bgc_untrack_uoh_alloc() { if (current_c_gc_state == c_gc_state_planning) { Interlocked::Decrement (&uoh_alloc_thread_count); dprintf (3, ("h%d: dec lc: %d", heap_number, (int32_t)uoh_alloc_thread_count)); } } int bgc_allocate_spin(size_t min_gc_size, size_t bgc_begin_size, size_t bgc_size_increased, size_t end_size) { if ((bgc_begin_size + bgc_size_increased) < (min_gc_size * 10)) { // just do it, no spinning return 0; } if ((bgc_begin_size >= (2 * end_size)) || (bgc_size_increased >= bgc_begin_size)) { if (bgc_begin_size >= (2 * end_size)) { dprintf (3, ("alloc-ed too much before bgc started")); } else { dprintf (3, ("alloc-ed too much after bgc started")); } // -1 means wait for bgc return -1; } else { return (int)(((float)bgc_size_increased / (float)bgc_begin_size) * 10); } } int gc_heap::bgc_loh_allocate_spin() { size_t min_gc_size = dd_min_size (dynamic_data_of (loh_generation)); size_t bgc_begin_size = bgc_begin_loh_size; size_t bgc_size_increased = bgc_loh_size_increased; size_t end_size = end_loh_size; return bgc_allocate_spin(min_gc_size, bgc_begin_size, bgc_size_increased, end_size); } int gc_heap::bgc_poh_allocate_spin() { size_t min_gc_size = dd_min_size (dynamic_data_of (poh_generation)); size_t bgc_begin_size = bgc_begin_poh_size; size_t bgc_size_increased = bgc_poh_size_increased; size_t end_size = end_poh_size; return bgc_allocate_spin(min_gc_size, bgc_begin_size, bgc_size_increased, end_size); } #endif //BACKGROUND_GC size_t gc_heap::get_uoh_seg_size (size_t size) { size_t default_seg_size = #ifdef USE_REGIONS global_region_allocator.get_large_region_alignment(); #else min_uoh_segment_size; #endif //USE_REGIONS size_t align_size = default_seg_size; int align_const = get_alignment_constant (FALSE); size_t large_seg_size = align_on_page ( max (default_seg_size, ((size + 2 * Align(min_obj_size, align_const) + OS_PAGE_SIZE + align_size) / align_size * align_size))); return large_seg_size; } BOOL gc_heap::uoh_get_new_seg (int gen_number, size_t size, BOOL* did_full_compact_gc, oom_reason* oom_r) { *did_full_compact_gc = FALSE; size_t seg_size = get_uoh_seg_size (size); heap_segment* new_seg = get_uoh_segment (gen_number, seg_size, did_full_compact_gc); if (new_seg && (gen_number == loh_generation)) { loh_alloc_since_cg += seg_size; } else { *oom_r = oom_loh; } return (new_seg != 0); } // PERF TODO: this is too aggressive; and in hard limit we should // count the actual allocated bytes instead of only updating it during // getting a new seg. BOOL gc_heap::retry_full_compact_gc (size_t size) { size_t seg_size = get_uoh_seg_size (size); if (loh_alloc_since_cg >= (2 * (uint64_t)seg_size)) { return TRUE; } #ifdef MULTIPLE_HEAPS uint64_t total_alloc_size = 0; for (int i = 0; i < n_heaps; i++) { total_alloc_size += g_heaps[i]->loh_alloc_since_cg; } if (total_alloc_size >= (2 * (uint64_t)seg_size)) { return TRUE; } #endif //MULTIPLE_HEAPS return FALSE; } BOOL gc_heap::check_and_wait_for_bgc (alloc_wait_reason awr, BOOL* did_full_compact_gc, bool loh_p) { BOOL bgc_in_progress = FALSE; *did_full_compact_gc = FALSE; #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { bgc_in_progress = TRUE; size_t last_full_compact_gc_count = get_full_compact_gc_count(); wait_for_background (awr, loh_p); size_t current_full_compact_gc_count = get_full_compact_gc_count(); if (current_full_compact_gc_count > last_full_compact_gc_count) { *did_full_compact_gc = TRUE; } } #endif //BACKGROUND_GC return bgc_in_progress; } BOOL gc_heap::uoh_try_fit (int gen_number, size_t size, alloc_context* acontext, uint32_t flags, int align_const, BOOL* commit_failed_p, oom_reason* oom_r) { BOOL can_allocate = TRUE; if (!a_fit_free_list_uoh_p (size, acontext, flags, align_const, gen_number)) { can_allocate = uoh_a_fit_segment_end_p (gen_number, size, acontext, flags, align_const, commit_failed_p, oom_r); #ifdef BACKGROUND_GC if (can_allocate && gc_heap::background_running_p()) { if (gen_number == poh_generation) { bgc_poh_size_increased += size; } else { bgc_loh_size_increased += size; } } #endif //BACKGROUND_GC } return can_allocate; } BOOL gc_heap::trigger_full_compact_gc (gc_reason gr, oom_reason* oom_r, bool loh_p) { BOOL did_full_compact_gc = FALSE; size_t last_full_compact_gc_count = get_full_compact_gc_count(); // Set this so the next GC will be a full compacting GC. if (!last_gc_before_oom) { last_gc_before_oom = TRUE; } #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { wait_for_background (((gr == reason_oos_soh) ? awr_gen0_oos_bgc : awr_loh_oos_bgc), loh_p); dprintf (2, ("waited for BGC - done")); } #endif //BACKGROUND_GC GCSpinLock* msl = loh_p ? &more_space_lock_uoh : &more_space_lock_soh; size_t current_full_compact_gc_count = get_full_compact_gc_count(); if (current_full_compact_gc_count > last_full_compact_gc_count) { dprintf (3, ("a full compacting GC triggered while waiting for BGC (%d->%d)", last_full_compact_gc_count, current_full_compact_gc_count)); assert (current_full_compact_gc_count > last_full_compact_gc_count); did_full_compact_gc = TRUE; goto exit; } dprintf (3, ("h%d full GC", heap_number)); trigger_gc_for_alloc (max_generation, gr, msl, loh_p, mt_t_full_gc); current_full_compact_gc_count = get_full_compact_gc_count(); if (current_full_compact_gc_count == last_full_compact_gc_count) { dprintf (2, ("attempted to trigger a full compacting GC but didn't get it")); // We requested a full GC but didn't get because of the elevation logic // which means we should fail. *oom_r = oom_unproductive_full_gc; } else { dprintf (3, ("h%d: T full compacting GC (%d->%d)", heap_number, last_full_compact_gc_count, current_full_compact_gc_count)); assert (current_full_compact_gc_count > last_full_compact_gc_count); did_full_compact_gc = TRUE; } exit: return did_full_compact_gc; } #ifdef RECORD_LOH_STATE void gc_heap::add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id) { // When the state is can_allocate we already have released the more // space lock. So we are not logging states here since this code // is not thread safe. if (loh_state_to_save != a_state_can_allocate) { last_loh_states[loh_state_index].alloc_state = loh_state_to_save; last_loh_states[loh_state_index].thread_id = thread_id; loh_state_index++; if (loh_state_index == max_saved_loh_states) { loh_state_index = 0; } assert (loh_state_index < max_saved_loh_states); } } #endif //RECORD_LOH_STATE bool gc_heap::should_retry_other_heap (int gen_number, size_t size) { #ifdef MULTIPLE_HEAPS if (heap_hard_limit) { size_t min_size = dd_min_size (g_heaps[0]->dynamic_data_of (gen_number)); size_t slack_space = max (commit_min_th, min_size); bool retry_p = ((current_total_committed + size) < (heap_hard_limit - slack_space)); dprintf (1, ("%Id - %Id - total committed %Id - size %Id = %Id, %s", heap_hard_limit, slack_space, current_total_committed, size, (heap_hard_limit - slack_space - current_total_committed - size), (retry_p ? "retry" : "no retry"))); return retry_p; } else #endif //MULTIPLE_HEAPS { return false; } } allocation_state gc_heap::allocate_uoh (int gen_number, size_t size, alloc_context* acontext, uint32_t flags, int align_const) { #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { #ifdef BGC_SERVO_TUNING bool planning_p = (current_c_gc_state == c_gc_state_planning); #endif //BGC_SERVO_TUNING background_uoh_alloc_count++; //if ((background_loh_alloc_count % bgc_alloc_spin_count_loh) == 0) { #ifdef BGC_SERVO_TUNING if (planning_p) { loh_a_bgc_planning += size; } else { loh_a_bgc_marking += size; } #endif //BGC_SERVO_TUNING int spin_for_allocation = (gen_number == loh_generation) ? bgc_loh_allocate_spin() : bgc_poh_allocate_spin(); if (spin_for_allocation > 0) { add_saved_spinlock_info (true, me_release, mt_alloc_large); leave_spin_lock (&more_space_lock_uoh); bool cooperative_mode = enable_preemptive(); GCToOSInterface::YieldThread (spin_for_allocation); disable_preemptive (cooperative_mode); enter_spin_lock (&more_space_lock_uoh); add_saved_spinlock_info (true, me_acquire, mt_alloc_large); dprintf (SPINLOCK_LOG, ("[%d]spin Emsl uoh", heap_number)); } else if (spin_for_allocation < 0) { wait_for_background (awr_uoh_alloc_during_bgc, true); } } } #ifdef BGC_SERVO_TUNING else { loh_a_no_bgc += size; } #endif //BGC_SERVO_TUNING #endif //BACKGROUND_GC gc_reason gr = reason_oos_loh; generation* gen = generation_of (gen_number); oom_reason oom_r = oom_no_failure; size_t current_full_compact_gc_count = 0; // No variable values should be "carried over" from one state to the other. // That's why there are local variable for each state allocation_state uoh_alloc_state = a_state_start; #ifdef RECORD_LOH_STATE EEThreadId current_thread_id; current_thread_id.SetToCurrentThread(); #endif //RECORD_LOH_STATE // If we can get a new seg it means allocation will succeed. while (1) { dprintf (3, ("[h%d]loh state is %s", heap_number, allocation_state_str[uoh_alloc_state])); #ifdef RECORD_LOH_STATE add_saved_loh_state (loh_uoh_alloc_state, current_thread_id); #endif //RECORD_LOH_STATE switch (uoh_alloc_state) { case a_state_can_allocate: case a_state_cant_allocate: { goto exit; } case a_state_start: { uoh_alloc_state = a_state_try_fit; break; } case a_state_try_fit: { BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; can_use_existing_p = uoh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &oom_r); uoh_alloc_state = (can_use_existing_p ? a_state_can_allocate : (commit_failed_p ? a_state_trigger_full_compact_gc : a_state_acquire_seg)); assert ((uoh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0)); break; } case a_state_try_fit_new_seg: { BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; can_use_existing_p = uoh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &oom_r); // Even after we got a new seg it doesn't necessarily mean we can allocate, // another LOH allocating thread could have beat us to acquire the msl so // we need to try again. uoh_alloc_state = (can_use_existing_p ? a_state_can_allocate : a_state_try_fit); assert ((uoh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0)); break; } case a_state_try_fit_after_cg: { BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; can_use_existing_p = uoh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &oom_r); // If we failed to commit, we bail right away 'cause we already did a // full compacting GC. uoh_alloc_state = (can_use_existing_p ? a_state_can_allocate : (commit_failed_p ? a_state_cant_allocate : a_state_acquire_seg_after_cg)); assert ((uoh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0)); break; } case a_state_try_fit_after_bgc: { BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; can_use_existing_p = uoh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &oom_r); uoh_alloc_state = (can_use_existing_p ? a_state_can_allocate : (commit_failed_p ? a_state_trigger_full_compact_gc : a_state_acquire_seg_after_bgc)); assert ((uoh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0)); break; } case a_state_acquire_seg: { BOOL can_get_new_seg_p = FALSE; BOOL did_full_compacting_gc = FALSE; current_full_compact_gc_count = get_full_compact_gc_count(); can_get_new_seg_p = uoh_get_new_seg (gen_number, size, &did_full_compacting_gc, &oom_r); uoh_alloc_state = (can_get_new_seg_p ? a_state_try_fit_new_seg : (did_full_compacting_gc ? a_state_check_retry_seg : a_state_check_and_wait_for_bgc)); break; } case a_state_acquire_seg_after_cg: { BOOL can_get_new_seg_p = FALSE; BOOL did_full_compacting_gc = FALSE; current_full_compact_gc_count = get_full_compact_gc_count(); can_get_new_seg_p = uoh_get_new_seg (gen_number, size, &did_full_compacting_gc, &oom_r); // Since we release the msl before we try to allocate a seg, other // threads could have allocated a bunch of segments before us so // we might need to retry. uoh_alloc_state = (can_get_new_seg_p ? a_state_try_fit_after_cg : a_state_check_retry_seg); break; } case a_state_acquire_seg_after_bgc: { BOOL can_get_new_seg_p = FALSE; BOOL did_full_compacting_gc = FALSE; current_full_compact_gc_count = get_full_compact_gc_count(); can_get_new_seg_p = uoh_get_new_seg (gen_number, size, &did_full_compacting_gc, &oom_r); uoh_alloc_state = (can_get_new_seg_p ? a_state_try_fit_new_seg : (did_full_compacting_gc ? a_state_check_retry_seg : a_state_trigger_full_compact_gc)); assert ((uoh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure)); break; } case a_state_check_and_wait_for_bgc: { BOOL bgc_in_progress_p = FALSE; BOOL did_full_compacting_gc = FALSE; bgc_in_progress_p = check_and_wait_for_bgc (awr_loh_oos_bgc, &did_full_compacting_gc, true); uoh_alloc_state = (!bgc_in_progress_p ? a_state_trigger_full_compact_gc : (did_full_compacting_gc ? a_state_try_fit_after_cg : a_state_try_fit_after_bgc)); break; } case a_state_trigger_full_compact_gc: { if (fgn_maxgen_percent) { dprintf (2, ("FGN: LOH doing last GC before we throw OOM")); send_full_gc_notification (max_generation, FALSE); } BOOL got_full_compacting_gc = FALSE; got_full_compacting_gc = trigger_full_compact_gc (gr, &oom_r, true); uoh_alloc_state = (got_full_compacting_gc ? a_state_try_fit_after_cg : a_state_cant_allocate); assert ((uoh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure)); break; } case a_state_check_retry_seg: { BOOL should_retry_gc = retry_full_compact_gc (size); BOOL should_retry_get_seg = FALSE; if (!should_retry_gc) { size_t last_full_compact_gc_count = current_full_compact_gc_count; current_full_compact_gc_count = get_full_compact_gc_count(); if (current_full_compact_gc_count > last_full_compact_gc_count) { should_retry_get_seg = TRUE; } } uoh_alloc_state = (should_retry_gc ? a_state_trigger_full_compact_gc : (should_retry_get_seg ? a_state_try_fit_after_cg : a_state_cant_allocate)); assert ((uoh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure)); break; } default: { assert (!"Invalid state!"); break; } } } exit: if (uoh_alloc_state == a_state_cant_allocate) { assert (oom_r != oom_no_failure); if ((oom_r != oom_cant_commit) && should_retry_other_heap (gen_number, size)) { uoh_alloc_state = a_state_retry_allocate; } else { handle_oom (oom_r, size, 0, 0); } add_saved_spinlock_info (true, me_release, mt_alloc_large_cant); leave_spin_lock (&more_space_lock_uoh); } assert ((uoh_alloc_state == a_state_can_allocate) || (uoh_alloc_state == a_state_cant_allocate) || (uoh_alloc_state == a_state_retry_allocate)); return uoh_alloc_state; } // BGC's final mark phase will acquire the msl, so release it here and re-acquire. void gc_heap::trigger_gc_for_alloc (int gen_number, gc_reason gr, GCSpinLock* msl, bool loh_p, msl_take_state take_state) { #ifdef BACKGROUND_GC if (loh_p) { add_saved_spinlock_info (loh_p, me_release, take_state); leave_spin_lock (msl); } #endif //BACKGROUND_GC vm_heap->GarbageCollectGeneration (gen_number, gr); #ifdef MULTIPLE_HEAPS if (!loh_p) { enter_spin_lock (msl); add_saved_spinlock_info (loh_p, me_acquire, take_state); } #endif //MULTIPLE_HEAPS #ifdef BACKGROUND_GC if (loh_p) { enter_spin_lock (msl); add_saved_spinlock_info (loh_p, me_acquire, take_state); } #endif //BACKGROUND_GC } inline bool gc_heap::update_alloc_info (int gen_number, size_t allocated_size, size_t* etw_allocation_amount) { bool exceeded_p = false; int oh_index = gen_to_oh (gen_number); allocated_since_last_gc[oh_index] += allocated_size; size_t& etw_allocated = etw_allocation_running_amount[oh_index]; etw_allocated += allocated_size; if (etw_allocated > etw_allocation_tick) { *etw_allocation_amount = etw_allocated; exceeded_p = true; etw_allocated = 0; } return exceeded_p; } allocation_state gc_heap::try_allocate_more_space (alloc_context* acontext, size_t size, uint32_t flags, int gen_number) { if (gc_heap::gc_started) { wait_for_gc_done(); return a_state_retry_allocate; } bool loh_p = (gen_number > 0); GCSpinLock* msl = loh_p ? &more_space_lock_uoh : &more_space_lock_soh; #ifdef SYNCHRONIZATION_STATS int64_t msl_acquire_start = GCToOSInterface::QueryPerformanceCounter(); #endif //SYNCHRONIZATION_STATS enter_spin_lock (msl); add_saved_spinlock_info (loh_p, me_acquire, mt_try_alloc); dprintf (SPINLOCK_LOG, ("[%d]Emsl for alloc", heap_number)); #ifdef SYNCHRONIZATION_STATS int64_t msl_acquire = GCToOSInterface::QueryPerformanceCounter() - msl_acquire_start; total_msl_acquire += msl_acquire; num_msl_acquired++; if (msl_acquire > 200) { num_high_msl_acquire++; } else { num_low_msl_acquire++; } #endif //SYNCHRONIZATION_STATS /* // We are commenting this out 'cause we don't see the point - we already // have checked gc_started when we were acquiring the msl - no need to check // again. This complicates the logic in bgc_suspend_EE 'cause that one would // need to release msl which causes all sorts of trouble. if (gc_heap::gc_started) { #ifdef SYNCHRONIZATION_STATS good_suspension++; #endif //SYNCHRONIZATION_STATS BOOL fStress = (g_pConfig->GetGCStressLevel() & GCConfig::GCSTRESS_TRANSITION) != 0; if (!fStress) { //Rendez vous early (MP scaling issue) //dprintf (1, ("[%d]waiting for gc", heap_number)); wait_for_gc_done(); #ifdef MULTIPLE_HEAPS return -1; #endif //MULTIPLE_HEAPS } } */ dprintf (3, ("requested to allocate %d bytes on gen%d", size, gen_number)); int align_const = get_alignment_constant (gen_number <= max_generation); if (fgn_maxgen_percent) { check_for_full_gc (gen_number, size); } #ifdef BGC_SERVO_TUNING if ((gen_number != 0) && bgc_tuning::should_trigger_bgc_loh()) { trigger_gc_for_alloc (max_generation, reason_bgc_tuning_loh, msl, loh_p, mt_try_servo_budget); } else #endif //BGC_SERVO_TUNING { bool trigger_on_budget_loh_p = #ifdef BGC_SERVO_TUNING !bgc_tuning::enable_fl_tuning; #else true; #endif //BGC_SERVO_TUNING bool check_budget_p = true; if (gen_number != 0) { check_budget_p = trigger_on_budget_loh_p; } if (check_budget_p && !(new_allocation_allowed (gen_number))) { if (fgn_maxgen_percent && (gen_number == 0)) { // We only check gen0 every so often, so take this opportunity to check again. check_for_full_gc (gen_number, size); } #ifdef BACKGROUND_GC bool recheck_p = wait_for_bgc_high_memory (awr_gen0_alloc, loh_p); #endif //BACKGROUND_GC #ifdef SYNCHRONIZATION_STATS bad_suspension++; #endif //SYNCHRONIZATION_STATS dprintf (2, ("h%d running out of budget on gen%d, gc", heap_number, gen_number)); #ifdef BACKGROUND_GC bool trigger_gc_p = true; if (recheck_p) trigger_gc_p = !(new_allocation_allowed (gen_number)); if (trigger_gc_p) #endif //BACKGROUND_GC { if (!settings.concurrent || (gen_number == 0)) { trigger_gc_for_alloc (0, ((gen_number == 0) ? reason_alloc_soh : reason_alloc_loh), msl, loh_p, mt_try_budget); } } } } allocation_state can_allocate = ((gen_number == 0) ? allocate_soh (gen_number, size, acontext, flags, align_const) : allocate_uoh (gen_number, size, acontext, flags, align_const)); return can_allocate; } #ifdef MULTIPLE_HEAPS void gc_heap::balance_heaps (alloc_context* acontext) { if (acontext->alloc_count < 4) { if (acontext->alloc_count == 0) { int home_hp_num = heap_select::select_heap (acontext); acontext->set_home_heap (GCHeap::GetHeap (home_hp_num)); gc_heap* hp = acontext->get_home_heap ()->pGenGCHeap; acontext->set_alloc_heap (acontext->get_home_heap ()); hp->alloc_context_count++; #ifdef HEAP_BALANCE_INSTRUMENTATION uint16_t ideal_proc_no = 0; GCToOSInterface::GetCurrentThreadIdealProc (&ideal_proc_no); uint32_t proc_no = GCToOSInterface::GetCurrentProcessorNumber (); add_to_hb_numa (proc_no, ideal_proc_no, home_hp_num, false, true, false); dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPafter GC: 1st alloc on p%3d, h%d, ip: %d", proc_no, home_hp_num, ideal_proc_no)); #endif //HEAP_BALANCE_INSTRUMENTATION } } else { BOOL set_home_heap = FALSE; gc_heap* home_hp = NULL; int proc_hp_num = 0; #ifdef HEAP_BALANCE_INSTRUMENTATION bool alloc_count_p = true; bool multiple_procs_p = false; bool set_ideal_p = false; uint32_t proc_no = GCToOSInterface::GetCurrentProcessorNumber (); uint32_t last_proc_no = proc_no; #endif //HEAP_BALANCE_INSTRUMENTATION if (heap_select::can_find_heap_fast ()) { assert (acontext->get_home_heap () != NULL); home_hp = acontext->get_home_heap ()->pGenGCHeap; proc_hp_num = heap_select::select_heap (acontext); if (home_hp != gc_heap::g_heaps[proc_hp_num]) { #ifdef HEAP_BALANCE_INSTRUMENTATION alloc_count_p = false; #endif //HEAP_BALANCE_INSTRUMENTATION set_home_heap = TRUE; } else if ((acontext->alloc_count & 15) == 0) set_home_heap = TRUE; } else { if ((acontext->alloc_count & 3) == 0) set_home_heap = TRUE; } if (set_home_heap) { /* // Since we are balancing up to MAX_SUPPORTED_CPUS, no need for this. if (n_heaps > MAX_SUPPORTED_CPUS) { // on machines with many processors cache affinity is really king, so don't even try // to balance on these. acontext->home_heap = GCHeap::GetHeap( heap_select::select_heap(acontext)); acontext->alloc_heap = acontext->home_heap; } else */ { gc_heap* org_hp = acontext->get_alloc_heap ()->pGenGCHeap; int org_hp_num = org_hp->heap_number; int final_alloc_hp_num = org_hp_num; dynamic_data* dd = org_hp->dynamic_data_of (0); ptrdiff_t org_size = dd_new_allocation (dd); ptrdiff_t total_size = (ptrdiff_t)dd_desired_allocation (dd); #ifdef HEAP_BALANCE_INSTRUMENTATION dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMP[p%3d] ph h%3d, hh: %3d, ah: %3d (%dmb-%dmb), ac: %5d(%s)", proc_no, proc_hp_num, home_hp->heap_number, org_hp_num, (total_size / 1024 / 1024), (org_size / 1024 / 1024), acontext->alloc_count, ((proc_hp_num == home_hp->heap_number) ? "AC" : "H"))); #endif //HEAP_BALANCE_INSTRUMENTATION int org_alloc_context_count; int max_alloc_context_count; gc_heap* max_hp; int max_hp_num = 0; ptrdiff_t max_size; size_t local_delta = max (((size_t)org_size >> 6), min_gen0_balance_delta); size_t delta = local_delta; if (((size_t)org_size + 2 * delta) >= (size_t)total_size) { acontext->alloc_count++; return; } #ifdef HEAP_BALANCE_INSTRUMENTATION proc_no = GCToOSInterface::GetCurrentProcessorNumber (); if (proc_no != last_proc_no) { dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPSP: %d->%d", last_proc_no, proc_no)); multiple_procs_p = true; last_proc_no = proc_no; } int new_home_hp_num = heap_select::proc_no_to_heap_no[proc_no]; #else int new_home_hp_num = heap_select::select_heap(acontext); #endif //HEAP_BALANCE_INSTRUMENTATION gc_heap* new_home_hp = gc_heap::g_heaps[new_home_hp_num]; acontext->set_home_heap (new_home_hp->vm_heap); int start, end, finish; heap_select::get_heap_range_for_heap (new_home_hp_num, &start, &end); finish = start + n_heaps; do { max_hp = org_hp; max_hp_num = org_hp_num; max_size = org_size + delta; org_alloc_context_count = org_hp->alloc_context_count; max_alloc_context_count = org_alloc_context_count; if (org_hp == new_home_hp) max_size = max_size + delta; if (max_alloc_context_count > 1) max_size /= max_alloc_context_count; // check if the new home heap has more space if (org_hp != new_home_hp) { dd = new_home_hp->dynamic_data_of(0); ptrdiff_t size = dd_new_allocation(dd); // favor new home heap over org heap size += delta * 2; int new_home_hp_alloc_context_count = new_home_hp->alloc_context_count; if (new_home_hp_alloc_context_count > 0) size /= (new_home_hp_alloc_context_count + 1); if (size > max_size) { #ifdef HEAP_BALANCE_INSTRUMENTATION dprintf(HEAP_BALANCE_TEMP_LOG, ("TEMPorg h%d(%dmb), m h%d(%dmb)", org_hp_num, (max_size / 1024 / 1024), new_home_hp_num, (size / 1024 / 1024))); #endif //HEAP_BALANCE_INSTRUMENTATION max_hp = new_home_hp; max_size = size; max_hp_num = new_home_hp_num; max_alloc_context_count = new_home_hp_alloc_context_count; } } // consider heaps both inside our local NUMA node, // and outside, but with different thresholds enum { LOCAL_NUMA_NODE, REMOTE_NUMA_NODE }; for (int pass = LOCAL_NUMA_NODE; pass <= REMOTE_NUMA_NODE; pass++) { int count = end - start; int max_tries = min(count, 4); // we will consider max_tries consecutive (in a circular sense) // other heaps from a semi random starting point // alloc_count often increases by multiples of 16 (due to logic at top of routine), // and we want to advance the starting point by 4 between successive calls, // therefore the shift right by 2 bits int heap_num = start + ((acontext->alloc_count >> 2) + new_home_hp_num) % count; #ifdef HEAP_BALANCE_INSTRUMENTATION dprintf(HEAP_BALANCE_TEMP_LOG, ("TEMP starting at h%d (home_heap_num = %d, alloc_count = %d)", heap_num, new_home_hp_num, acontext->alloc_count)); #endif //HEAP_BALANCE_INSTRUMENTATION for (int tries = max_tries; --tries >= 0; heap_num++) { // wrap around if we hit the end of our range if (heap_num >= end) heap_num -= count; // wrap around if we hit the end of the heap numbers if (heap_num >= n_heaps) heap_num -= n_heaps; assert (heap_num < n_heaps); gc_heap* hp = gc_heap::g_heaps[heap_num]; dd = hp->dynamic_data_of(0); ptrdiff_t size = dd_new_allocation(dd); #ifdef HEAP_BALANCE_INSTRUMENTATION dprintf(HEAP_BALANCE_TEMP_LOG, ("TEMP looking at h%d(%dmb)", heap_num, (size / 1024 / 1024))); #endif //HEAP_BALANCE_INSTRUMENTATION // if the size is not bigger than what we already have, // give up immediately, as it can't be a winner... // this is a micro-optimization to avoid fetching the // alloc_context_count and possibly dividing by it if (size <= max_size) continue; int hp_alloc_context_count = hp->alloc_context_count; if (hp_alloc_context_count > 0) { size /= (hp_alloc_context_count + 1); } if (size > max_size) { #ifdef HEAP_BALANCE_INSTRUMENTATION dprintf(HEAP_BALANCE_TEMP_LOG, ("TEMPorg h%d(%dmb), m h%d(%dmb)", org_hp_num, (max_size / 1024 / 1024), hp->heap_number, (size / 1024 / 1024))); #endif //HEAP_BALANCE_INSTRUMENTATION max_hp = hp; max_size = size; max_hp_num = max_hp->heap_number; max_alloc_context_count = hp_alloc_context_count; } } if ((max_hp == org_hp) && (end < finish)) { start = end; end = finish; delta = local_delta * 2; // Make it twice as hard to balance to remote nodes on NUMA. } else { // we already found a better heap, or there are no remote NUMA nodes break; } } } while (org_alloc_context_count != org_hp->alloc_context_count || max_alloc_context_count != max_hp->alloc_context_count); #ifdef HEAP_BALANCE_INSTRUMENTATION uint16_t ideal_proc_no_before_set_ideal = 0; GCToOSInterface::GetCurrentThreadIdealProc (&ideal_proc_no_before_set_ideal); #endif //HEAP_BALANCE_INSTRUMENTATION if (max_hp != org_hp) { final_alloc_hp_num = max_hp->heap_number; org_hp->alloc_context_count--; max_hp->alloc_context_count++; acontext->set_alloc_heap (GCHeap::GetHeap (final_alloc_hp_num)); if (!gc_thread_no_affinitize_p) { uint16_t src_proc_no = heap_select::find_proc_no_from_heap_no (org_hp->heap_number); uint16_t dst_proc_no = heap_select::find_proc_no_from_heap_no (max_hp->heap_number); dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPSW! h%d(p%d)->h%d(p%d)", org_hp_num, src_proc_no, final_alloc_hp_num, dst_proc_no)); #ifdef HEAP_BALANCE_INSTRUMENTATION int current_proc_no_before_set_ideal = GCToOSInterface::GetCurrentProcessorNumber (); if (current_proc_no_before_set_ideal != last_proc_no) { dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPSPa: %d->%d", last_proc_no, current_proc_no_before_set_ideal)); multiple_procs_p = true; } #endif //HEAP_BALANCE_INSTRUMENTATION if (!GCToOSInterface::SetCurrentThreadIdealAffinity (src_proc_no, dst_proc_no)) { dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPFailed to set the ideal processor for heap %d %d->%d", org_hp->heap_number, (int)src_proc_no, (int)dst_proc_no)); } #ifdef HEAP_BALANCE_INSTRUMENTATION else { set_ideal_p = true; } #endif //HEAP_BALANCE_INSTRUMENTATION } } #ifdef HEAP_BALANCE_INSTRUMENTATION add_to_hb_numa (proc_no, ideal_proc_no_before_set_ideal, final_alloc_hp_num, multiple_procs_p, alloc_count_p, set_ideal_p); #endif //HEAP_BALANCE_INSTRUMENTATION } } } acontext->alloc_count++; } ptrdiff_t gc_heap::get_balance_heaps_uoh_effective_budget (int generation_num) { if (heap_hard_limit) { const ptrdiff_t free_list_space = generation_free_list_space (generation_of (generation_num)); heap_segment* seg = generation_start_segment (generation_of (generation_num)); assert (heap_segment_next (seg) == nullptr); const ptrdiff_t allocated = heap_segment_allocated (seg) - seg->mem; // We could calculate the actual end_of_seg_space by taking reserved - allocated, // but all heaps have the same reserved memory and this value is only used for comparison. return free_list_space - allocated; } else { return dd_new_allocation (dynamic_data_of (generation_num)); } } gc_heap* gc_heap::balance_heaps_uoh (alloc_context* acontext, size_t alloc_size, int generation_num) { const int home_hp_num = heap_select::select_heap(acontext); dprintf (3, ("[h%d] LA: %Id", home_hp_num, alloc_size)); gc_heap* home_hp = GCHeap::GetHeap(home_hp_num)->pGenGCHeap; dynamic_data* dd = home_hp->dynamic_data_of (generation_num); const ptrdiff_t home_hp_size = home_hp->get_balance_heaps_uoh_effective_budget (generation_num); size_t delta = dd_min_size (dd) / 2; int start, end; heap_select::get_heap_range_for_heap(home_hp_num, &start, &end); const int finish = start + n_heaps; try_again: gc_heap* max_hp = home_hp; ptrdiff_t max_size = home_hp_size + delta; dprintf (3, ("home hp: %d, max size: %d", home_hp_num, max_size)); for (int i = start; i < end; i++) { gc_heap* hp = GCHeap::GetHeap(i%n_heaps)->pGenGCHeap; const ptrdiff_t size = hp->get_balance_heaps_uoh_effective_budget (generation_num); dprintf (3, ("hp: %d, size: %d", hp->heap_number, size)); if (size > max_size) { max_hp = hp; max_size = size; dprintf (3, ("max hp: %d, max size: %d", max_hp->heap_number, max_size)); } } if ((max_hp == home_hp) && (end < finish)) { start = end; end = finish; delta = dd_min_size (dd) * 3 / 2; // Make it harder to balance to remote nodes on NUMA. goto try_again; } if (max_hp != home_hp) { dprintf (3, ("uoh: %d(%Id)->%d(%Id)", home_hp->heap_number, dd_new_allocation (home_hp->dynamic_data_of (generation_num)), max_hp->heap_number, dd_new_allocation (max_hp->dynamic_data_of (generation_num)))); } return max_hp; } gc_heap* gc_heap::balance_heaps_uoh_hard_limit_retry (alloc_context* acontext, size_t alloc_size, int generation_num) { assert (heap_hard_limit); const int home_heap = heap_select::select_heap(acontext); dprintf (3, ("[h%d] balance_heaps_loh_hard_limit_retry alloc_size: %d", home_heap, alloc_size)); int start, end; heap_select::get_heap_range_for_heap (home_heap, &start, &end); const int finish = start + n_heaps; gc_heap* max_hp = nullptr; size_t max_end_of_seg_space = alloc_size; // Must be more than this much, or return NULL try_again: { for (int i = start; i < end; i++) { gc_heap* hp = GCHeap::GetHeap (i%n_heaps)->pGenGCHeap; heap_segment* seg = generation_start_segment (hp->generation_of (generation_num)); // With a hard limit, there is only one segment. assert (heap_segment_next (seg) == nullptr); const size_t end_of_seg_space = heap_segment_reserved (seg) - heap_segment_allocated (seg); if (end_of_seg_space >= max_end_of_seg_space) { dprintf (3, ("Switching heaps in hard_limit_retry! To: [h%d], New end_of_seg_space: %d", hp->heap_number, end_of_seg_space)); max_end_of_seg_space = end_of_seg_space; max_hp = hp; } } } // Only switch to a remote NUMA node if we didn't find space on this one. if ((max_hp == nullptr) && (end < finish)) { start = end; end = finish; goto try_again; } return max_hp; } #endif //MULTIPLE_HEAPS BOOL gc_heap::allocate_more_space(alloc_context* acontext, size_t size, uint32_t flags, int alloc_generation_number) { allocation_state status = a_state_start; do { #ifdef MULTIPLE_HEAPS if (alloc_generation_number == 0) { balance_heaps (acontext); status = acontext->get_alloc_heap()->pGenGCHeap->try_allocate_more_space (acontext, size, flags, alloc_generation_number); } else { gc_heap* alloc_heap; if (heap_hard_limit && (status == a_state_retry_allocate)) { alloc_heap = balance_heaps_uoh_hard_limit_retry (acontext, size, alloc_generation_number); if (alloc_heap == nullptr) { return false; } } else { alloc_heap = balance_heaps_uoh (acontext, size, alloc_generation_number); } status = alloc_heap->try_allocate_more_space (acontext, size, flags, alloc_generation_number); if (status == a_state_retry_allocate) { dprintf (3, ("UOH h%d alloc retry!", alloc_heap->heap_number)); } } #else status = try_allocate_more_space (acontext, size, flags, alloc_generation_number); #endif //MULTIPLE_HEAPS } while (status == a_state_retry_allocate); return (status == a_state_can_allocate); } inline CObjectHeader* gc_heap::allocate (size_t jsize, alloc_context* acontext, uint32_t flags) { size_t size = Align (jsize); assert (size >= Align (min_obj_size)); { retry: uint8_t* result = acontext->alloc_ptr; acontext->alloc_ptr+=size; if (acontext->alloc_ptr <= acontext->alloc_limit) { CObjectHeader* obj = (CObjectHeader*)result; assert (obj != 0); return obj; } else { acontext->alloc_ptr -= size; #ifdef _MSC_VER #pragma inline_depth(0) #endif //_MSC_VER if (! allocate_more_space (acontext, size, flags, 0)) return 0; #ifdef _MSC_VER #pragma inline_depth(20) #endif //_MSC_VER goto retry; } } } void gc_heap::leave_allocation_segment (generation* gen) { adjust_limit (0, 0, gen); } void gc_heap::init_free_and_plug() { #ifdef FREE_USAGE_STATS int i = (settings.concurrent ? max_generation : 0); for (; i <= settings.condemned_generation; i++) { generation* gen = generation_of (i); #ifdef DOUBLY_LINKED_FL print_free_and_plug ("BGC"); #else memset (gen->gen_free_spaces, 0, sizeof (gen->gen_free_spaces)); #endif //DOUBLY_LINKED_FL memset (gen->gen_plugs, 0, sizeof (gen->gen_plugs)); memset (gen->gen_current_pinned_free_spaces, 0, sizeof (gen->gen_current_pinned_free_spaces)); } if (settings.condemned_generation != max_generation) { for (int i = (settings.condemned_generation + 1); i <= max_generation; i++) { generation* gen = generation_of (i); memset (gen->gen_plugs, 0, sizeof (gen->gen_plugs)); } } #endif //FREE_USAGE_STATS } void gc_heap::print_free_and_plug (const char* msg) { #ifdef FREE_USAGE_STATS int older_gen = ((settings.condemned_generation == max_generation) ? max_generation : (settings.condemned_generation + 1)); for (int i = 0; i <= older_gen; i++) { generation* gen = generation_of (i); for (int j = 0; j < NUM_GEN_POWER2; j++) { if ((gen->gen_free_spaces[j] != 0) || (gen->gen_plugs[j] != 0)) { dprintf (2, ("[%s][h%d][%s#%d]gen%d: 2^%d: F: %Id, P: %Id", msg, heap_number, (settings.concurrent ? "BGC" : "GC"), settings.gc_index, i, (j + 9), gen->gen_free_spaces[j], gen->gen_plugs[j])); } } } #else UNREFERENCED_PARAMETER(msg); #endif //FREE_USAGE_STATS } // replace with allocator::first_suitable_bucket int gc_heap::find_bucket (size_t size) { size_t sz = BASE_GEN_SIZE; int i = 0; for (; i < (NUM_GEN_POWER2 - 1); i++) { if (size < sz) { break; } sz = sz * 2; } return i; } void gc_heap::add_gen_plug (int gen_number, size_t plug_size) { #ifdef FREE_USAGE_STATS dprintf (3, ("adding plug size %Id to gen%d", plug_size, gen_number)); generation* gen = generation_of (gen_number); size_t sz = BASE_GEN_SIZE; int i = find_bucket (plug_size); (gen->gen_plugs[i])++; #else UNREFERENCED_PARAMETER(gen_number); UNREFERENCED_PARAMETER(plug_size); #endif //FREE_USAGE_STATS } void gc_heap::add_item_to_current_pinned_free (int gen_number, size_t free_size) { #ifdef FREE_USAGE_STATS generation* gen = generation_of (gen_number); size_t sz = BASE_GEN_SIZE; int i = find_bucket (free_size); (gen->gen_current_pinned_free_spaces[i])++; generation_pinned_free_obj_space (gen) += free_size; dprintf (3, ("left pin free %Id(2^%d) to gen%d, total %Id bytes (%Id)", free_size, (i + 10), gen_number, generation_pinned_free_obj_space (gen), gen->gen_current_pinned_free_spaces[i])); #else UNREFERENCED_PARAMETER(gen_number); UNREFERENCED_PARAMETER(free_size); #endif //FREE_USAGE_STATS } // This is only for items large enough to be on the FL // Ideally we should keep track of smaller ones too but for now // it's easier to make the accounting right void gc_heap::add_gen_free (int gen_number, size_t free_size) { #ifdef FREE_USAGE_STATS dprintf (3, ("adding free size %Id to gen%d", free_size, gen_number)); if (free_size < min_free_list) return; generation* gen = generation_of (gen_number); size_t sz = BASE_GEN_SIZE; int i = find_bucket (free_size); (gen->gen_free_spaces[i])++; if (gen_number == max_generation) { dprintf (3, ("Mb b%d: f+ %Id (%Id)", i, free_size, gen->gen_free_spaces[i])); } #else UNREFERENCED_PARAMETER(gen_number); UNREFERENCED_PARAMETER(free_size); #endif //FREE_USAGE_STATS } void gc_heap::remove_gen_free (int gen_number, size_t free_size) { #ifdef FREE_USAGE_STATS dprintf (3, ("removing free %Id from gen%d", free_size, gen_number)); if (free_size < min_free_list) return; generation* gen = generation_of (gen_number); size_t sz = BASE_GEN_SIZE; int i = find_bucket (free_size); (gen->gen_free_spaces[i])--; if (gen_number == max_generation) { dprintf (3, ("Mb b%d: f- %Id (%Id)", i, free_size, gen->gen_free_spaces[i])); } #else UNREFERENCED_PARAMETER(gen_number); UNREFERENCED_PARAMETER(free_size); #endif //FREE_USAGE_STATS } #ifdef DOUBLY_LINKED_FL // This is only called on free spaces. BOOL gc_heap::should_set_bgc_mark_bit (uint8_t* o) { if (!current_sweep_seg) { assert (current_bgc_state == bgc_not_in_process); return FALSE; } // This is cheaper so I am doing this comparision first before having to get the seg for o. if (in_range_for_segment (o, current_sweep_seg)) { // The current sweep seg could have free spaces beyond its background_allocated so we need // to check for that. if ((o >= current_sweep_pos) && (o < heap_segment_background_allocated (current_sweep_seg))) { #ifndef USE_REGIONS if (current_sweep_seg == saved_sweep_ephemeral_seg) { return (o < saved_sweep_ephemeral_start); } else #endif //!USE_REGIONS { return TRUE; } } else return FALSE; } else { // We can have segments outside the BGC range that were allocated during mark - and we // wouldn't have committed the mark array for them and their background_allocated would be // non-zero. Don't set mark bits for those. // The ones allocated during BGC sweep would have their background_allocated as 0. if ((o >= background_saved_lowest_address) && (o < background_saved_highest_address)) { heap_segment* seg = seg_mapping_table_segment_of (o); // if bgc_allocated is 0 it means it was allocated during bgc sweep, // and everything on it should be considered live. uint8_t* background_allocated = heap_segment_background_allocated (seg); if (background_allocated == 0) return FALSE; // During BGC sweep gen1 GCs could add some free spaces in gen2. // If we use those, we should not set the mark bits on them. // They could either be a newly allocated seg which is covered by the // above case; or they are on a seg that's seen but beyond what BGC mark // saw. else if (o >= background_allocated) return FALSE; else return (!heap_segment_swept_p (seg)); } else return FALSE; } } #endif //DOUBLY_LINKED_FL uint8_t* gc_heap::allocate_in_older_generation (generation* gen, size_t size, int from_gen_number, uint8_t* old_loc REQD_ALIGN_AND_OFFSET_DCL) { size = Align (size); assert (size >= Align (min_obj_size)); assert (from_gen_number < max_generation); assert (from_gen_number >= 0); assert (generation_of (from_gen_number + 1) == gen); #ifdef DOUBLY_LINKED_FL BOOL consider_bgc_mark_p = FALSE; BOOL check_current_sweep_p = FALSE; BOOL check_saved_sweep_p = FALSE; BOOL try_added_list_p = (gen->gen_num == max_generation); BOOL record_free_list_allocated_p = ((gen->gen_num == max_generation) && (current_c_gc_state == c_gc_state_planning)); #endif //DOUBLY_LINKED_FL allocator* gen_allocator = generation_allocator (gen); BOOL discard_p = gen_allocator->discard_if_no_fit_p (); #ifdef SHORT_PLUGS int pad_in_front = ((old_loc != 0) && ((from_gen_number+1) != max_generation)) ? USE_PADDING_FRONT : 0; #else //SHORT_PLUGS int pad_in_front = 0; #endif //SHORT_PLUGS size_t real_size = size + Align (min_obj_size); if (pad_in_front) real_size += Align (min_obj_size); #ifdef RESPECT_LARGE_ALIGNMENT real_size += switch_alignment_size (pad_in_front); #endif //RESPECT_LARGE_ALIGNMENT if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen), generation_allocation_limit (gen), old_loc, USE_PADDING_TAIL | pad_in_front))) { for (unsigned int a_l_idx = gen_allocator->first_suitable_bucket(real_size * 2); a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++) { uint8_t* free_list = 0; uint8_t* prev_free_item = 0; BOOL use_undo_p = !discard_p; #ifdef DOUBLY_LINKED_FL if (a_l_idx == 0) { use_undo_p = FALSE; } if (try_added_list_p) { free_list = gen_allocator->added_alloc_list_head_of (a_l_idx); while (free_list != 0) { dprintf (3, ("considering free list in added list%Ix", (size_t)free_list)); size_t free_list_size = unused_array_size (free_list); if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, free_list, (free_list + free_list_size), old_loc, USE_PADDING_TAIL | pad_in_front)) { dprintf (4, ("F:%Ix-%Id", (size_t)free_list, free_list_size)); gen_allocator->unlink_item_no_undo_added (a_l_idx, free_list, prev_free_item); generation_free_list_space (gen) -= free_list_size; assert ((ptrdiff_t)generation_free_list_space (gen) >= 0); remove_gen_free (gen->gen_num, free_list_size); if (record_free_list_allocated_p) { generation_set_bgc_mark_bit_p (gen) = should_set_bgc_mark_bit (free_list); dprintf (3333, ("SFA: %Ix->%Ix(%d)", free_list, (free_list + free_list_size), (generation_set_bgc_mark_bit_p (gen) ? 1 : 0))); } adjust_limit (free_list, free_list_size, gen); generation_allocate_end_seg_p (gen) = FALSE; goto finished; } // We do first fit on bucket 0 because we are not guaranteed to find a fit there. else if (a_l_idx == 0) { dprintf (3, ("couldn't use this free area, discarding")); generation_free_obj_space (gen) += free_list_size; gen_allocator->unlink_item_no_undo_added (a_l_idx, free_list, prev_free_item); generation_free_list_space (gen) -= free_list_size; assert ((ptrdiff_t)generation_free_list_space (gen) >= 0); remove_gen_free (gen->gen_num, free_list_size); } else { prev_free_item = free_list; } free_list = free_list_slot (free_list); } } #endif //DOUBLY_LINKED_FL free_list = gen_allocator->alloc_list_head_of (a_l_idx); prev_free_item = 0; while (free_list != 0) { dprintf (3, ("considering free list %Ix", (size_t)free_list)); size_t free_list_size = unused_array_size (free_list); if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, free_list, (free_list + free_list_size), old_loc, USE_PADDING_TAIL | pad_in_front)) { dprintf (4, ("F:%Ix-%Id", (size_t)free_list, free_list_size)); gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, use_undo_p); generation_free_list_space (gen) -= free_list_size; assert ((ptrdiff_t)generation_free_list_space (gen) >= 0); remove_gen_free (gen->gen_num, free_list_size); #ifdef DOUBLY_LINKED_FL if (!discard_p && !use_undo_p) { gen2_removed_no_undo += free_list_size; dprintf (3, ("h%d: remove with no undo %Id = %Id", heap_number, free_list_size, gen2_removed_no_undo)); } if (record_free_list_allocated_p) { generation_set_bgc_mark_bit_p (gen) = should_set_bgc_mark_bit (free_list); dprintf (3333, ("SF: %Ix(%d)", free_list, (generation_set_bgc_mark_bit_p (gen) ? 1 : 0))); } #endif //DOUBLY_LINKED_FL adjust_limit (free_list, free_list_size, gen); generation_allocate_end_seg_p (gen) = FALSE; goto finished; } // We do first fit on bucket 0 because we are not guaranteed to find a fit there. else if (discard_p || (a_l_idx == 0)) { dprintf (3, ("couldn't use this free area, discarding")); generation_free_obj_space (gen) += free_list_size; gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE); generation_free_list_space (gen) -= free_list_size; assert ((ptrdiff_t)generation_free_list_space (gen) >= 0); remove_gen_free (gen->gen_num, free_list_size); #ifdef DOUBLY_LINKED_FL if (!discard_p) { gen2_removed_no_undo += free_list_size; dprintf (3, ("h%d: b0 remove with no undo %Id = %Id", heap_number, free_list_size, gen2_removed_no_undo)); } #endif //DOUBLY_LINKED_FL } else { prev_free_item = free_list; } free_list = free_list_slot (free_list); } } #ifdef USE_REGIONS // We don't want to always go back to the first region since there might be many. heap_segment* seg = generation_allocation_segment (gen); dprintf (3, ("end of seg, starting from alloc seg %Ix", heap_segment_mem (seg))); assert (seg != ephemeral_heap_segment); while (true) #else //go back to the beginning of the segment list heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); if (seg != generation_allocation_segment (gen)) { leave_allocation_segment (gen); generation_allocation_segment (gen) = seg; } while (seg != ephemeral_heap_segment) #endif //USE_REGIONS { if (size_fit_p(size REQD_ALIGN_AND_OFFSET_ARG, heap_segment_plan_allocated (seg), heap_segment_committed (seg), old_loc, USE_PADDING_TAIL | pad_in_front)) { adjust_limit (heap_segment_plan_allocated (seg), (heap_segment_committed (seg) - heap_segment_plan_allocated (seg)), gen); generation_allocate_end_seg_p (gen) = TRUE; heap_segment_plan_allocated (seg) = heap_segment_committed (seg); dprintf (3, ("seg %Ix is used for end of seg alloc", heap_segment_mem (seg))); goto finished; } else { if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, heap_segment_plan_allocated (seg), heap_segment_reserved (seg), old_loc, USE_PADDING_TAIL | pad_in_front) && grow_heap_segment (seg, heap_segment_plan_allocated (seg), old_loc, size, pad_in_front REQD_ALIGN_AND_OFFSET_ARG)) { adjust_limit (heap_segment_plan_allocated (seg), (heap_segment_committed (seg) - heap_segment_plan_allocated (seg)), gen); generation_allocate_end_seg_p (gen) = TRUE; heap_segment_plan_allocated (seg) = heap_segment_committed (seg); dprintf (3, ("seg %Ix is used for end of seg alloc after grow, %Ix", heap_segment_mem (seg), heap_segment_committed (seg))); goto finished; } else { leave_allocation_segment (gen); heap_segment* next_seg = heap_segment_next_rw (seg); #ifdef USE_REGIONS assert (next_seg != ephemeral_heap_segment); #endif //USE_REGIONS if (next_seg) { generation_allocation_segment (gen) = next_seg; generation_allocation_pointer (gen) = heap_segment_mem (next_seg); generation_allocation_limit (gen) = generation_allocation_pointer (gen); dprintf (3, ("alloc region advanced to %Ix", heap_segment_mem (next_seg))); } else { size = 0; goto finished; } } } seg = generation_allocation_segment (gen); } //No need to fix the last region. Will be done later size = 0; goto finished; } finished: if (0 == size) { return 0; } else { uint8_t* result = generation_allocation_pointer (gen); size_t pad = 0; #ifdef SHORT_PLUGS if ((pad_in_front & USE_PADDING_FRONT) && (((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))==0) || ((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))>=DESIRED_PLUG_LENGTH))) { pad = Align (min_obj_size); set_plug_padded (old_loc); } #endif //SHORT_PLUGS #ifdef FEATURE_STRUCTALIGN _ASSERTE(!old_loc || alignmentOffset != 0); _ASSERTE(old_loc || requiredAlignment == DATA_ALIGNMENT); if (old_loc != 0) { size_t pad1 = ComputeStructAlignPad(result+pad, requiredAlignment, alignmentOffset); set_node_aligninfo (old_loc, requiredAlignment, pad1); pad += pad1; } #else // FEATURE_STRUCTALIGN if (!((old_loc == 0) || same_large_alignment_p (old_loc, result+pad))) { pad += switch_alignment_size (pad != 0); set_node_realigned (old_loc); dprintf (3, ("Allocation realignment old_loc: %Ix, new_loc:%Ix", (size_t)old_loc, (size_t)(result+pad))); assert (same_large_alignment_p (result + pad, old_loc)); } #endif // FEATURE_STRUCTALIGN dprintf (3, ("Allocate %Id bytes", size)); if ((old_loc == 0) || (pad != 0)) { //allocating a non plug or a gap, so reset the start region generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen); } generation_allocation_pointer (gen) += size + pad; assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen)); generation_free_obj_space (gen) += pad; if (generation_allocate_end_seg_p (gen)) { generation_end_seg_allocated (gen) += size; } else { #ifdef DOUBLY_LINKED_FL if (generation_set_bgc_mark_bit_p (gen)) { dprintf (2, ("IOM: %Ix(->%Ix(%Id) (%Ix-%Ix)", old_loc, result, pad, (size_t)(&mark_array [mark_word_of (result)]), (size_t)(mark_array [mark_word_of (result)]))); set_plug_bgc_mark_bit (old_loc); } generation_last_free_list_allocated (gen) = old_loc; #endif //DOUBLY_LINKED_FL generation_free_list_allocated (gen) += size; } generation_allocation_size (gen) += size; dprintf (3, ("aio: ptr: %Ix, limit: %Ix, sr: %Ix", generation_allocation_pointer (gen), generation_allocation_limit (gen), generation_allocation_context_start_region (gen))); return (result + pad); } } #ifndef USE_REGIONS void gc_heap::repair_allocation_in_expanded_heap (generation* consing_gen) { //make sure that every generation has a planned allocation start int gen_number = max_generation - 1; while (gen_number>= 0) { generation* gen = generation_of (gen_number); if (0 == generation_plan_allocation_start (gen)) { realloc_plan_generation_start (gen, consing_gen); assert (generation_plan_allocation_start (gen)); } gen_number--; } // now we know the planned allocation size size_t size = (generation_allocation_limit (consing_gen) - generation_allocation_pointer (consing_gen)); heap_segment* seg = generation_allocation_segment (consing_gen); if (generation_allocation_limit (consing_gen) == heap_segment_plan_allocated (seg)) { if (size != 0) { heap_segment_plan_allocated (seg) = generation_allocation_pointer (consing_gen); } } else { assert (settings.condemned_generation == max_generation); uint8_t* first_address = generation_allocation_limit (consing_gen); //look through the pinned plugs for relevant ones. //Look for the right pinned plug to start from. size_t mi = 0; mark* m = 0; while (mi != mark_stack_tos) { m = pinned_plug_of (mi); if ((pinned_plug (m) == first_address)) break; else mi++; } assert (mi != mark_stack_tos); pinned_len (m) = size; } } //tododefrag optimize for new segment (plan_allocated == mem) uint8_t* gc_heap::allocate_in_expanded_heap (generation* gen, size_t size, BOOL& adjacentp, uint8_t* old_loc, #ifdef SHORT_PLUGS BOOL set_padding_on_saved_p, mark* pinned_plug_entry, #endif //SHORT_PLUGS BOOL consider_bestfit, int active_new_gen_number REQD_ALIGN_AND_OFFSET_DCL) { dprintf (3, ("aie: P: %Ix, size: %Ix", old_loc, size)); size = Align (size); assert (size >= Align (min_obj_size)); #ifdef SHORT_PLUGS int pad_in_front = ((old_loc != 0) && (active_new_gen_number != max_generation)) ? USE_PADDING_FRONT : 0; #else //SHORT_PLUGS int pad_in_front = 0; #endif //SHORT_PLUGS if (consider_bestfit && use_bestfit) { assert (bestfit_seg); dprintf (SEG_REUSE_LOG_1, ("reallocating 0x%Ix in expanded heap, size: %Id", old_loc, size)); return bestfit_seg->fit (old_loc, size REQD_ALIGN_AND_OFFSET_ARG); } heap_segment* seg = generation_allocation_segment (gen); if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen), generation_allocation_limit (gen), old_loc, ((generation_allocation_limit (gen) != heap_segment_plan_allocated (seg))? USE_PADDING_TAIL : 0) | pad_in_front))) { dprintf (3, ("aie: can't fit: ptr: %Ix, limit: %Ix", generation_allocation_pointer (gen), generation_allocation_limit (gen))); adjacentp = FALSE; uint8_t* first_address = (generation_allocation_limit (gen) ? generation_allocation_limit (gen) : heap_segment_mem (seg)); assert (in_range_for_segment (first_address, seg)); uint8_t* end_address = heap_segment_reserved (seg); dprintf (3, ("aie: first_addr: %Ix, gen alloc limit: %Ix, end_address: %Ix", first_address, generation_allocation_limit (gen), end_address)); size_t mi = 0; mark* m = 0; if (heap_segment_allocated (seg) != heap_segment_mem (seg)) { assert (settings.condemned_generation == max_generation); //look through the pinned plugs for relevant ones. //Look for the right pinned plug to start from. while (mi != mark_stack_tos) { m = pinned_plug_of (mi); if ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address)) { dprintf (3, ("aie: found pin: %Ix", pinned_plug (m))); break; } else mi++; } if (mi != mark_stack_tos) { //fix old free list. size_t hsize = (generation_allocation_limit (gen) - generation_allocation_pointer (gen)); { dprintf(3,("gc filling up hole")); ptrdiff_t mi1 = (ptrdiff_t)mi; while ((mi1 >= 0) && (pinned_plug (pinned_plug_of(mi1)) != generation_allocation_limit (gen))) { dprintf (3, ("aie: checking pin %Ix", pinned_plug (pinned_plug_of(mi1)))); mi1--; } if (mi1 >= 0) { size_t saved_pinned_len = pinned_len (pinned_plug_of(mi1)); pinned_len (pinned_plug_of(mi1)) = hsize; dprintf (3, ("changing %Ix len %Ix->%Ix", pinned_plug (pinned_plug_of(mi1)), saved_pinned_len, pinned_len (pinned_plug_of(mi1)))); } } } } else { assert (generation_allocation_limit (gen) == generation_allocation_pointer (gen)); mi = mark_stack_tos; } while ((mi != mark_stack_tos) && in_range_for_segment (pinned_plug (m), seg)) { size_t len = pinned_len (m); uint8_t* free_list = (pinned_plug (m) - len); dprintf (3, ("aie: testing free item: %Ix->%Ix(%Ix)", free_list, (free_list + len), len)); if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, free_list, (free_list + len), old_loc, USE_PADDING_TAIL | pad_in_front)) { dprintf (3, ("aie: Found adequate unused area: %Ix, size: %Id", (size_t)free_list, len)); { generation_allocation_pointer (gen) = free_list; generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen); generation_allocation_limit (gen) = (free_list + len); } goto allocate_in_free; } mi++; m = pinned_plug_of (mi); } //switch to the end of the segment. generation_allocation_pointer (gen) = heap_segment_plan_allocated (seg); generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen); heap_segment_plan_allocated (seg) = heap_segment_committed (seg); generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); dprintf (3, ("aie: switching to end of seg: %Ix->%Ix(%Ix)", generation_allocation_pointer (gen), generation_allocation_limit (gen), (generation_allocation_limit (gen) - generation_allocation_pointer (gen)))); if (!size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen), generation_allocation_limit (gen), old_loc, USE_PADDING_TAIL | pad_in_front)) { dprintf (3, ("aie: ptr: %Ix, limit: %Ix, can't alloc", generation_allocation_pointer (gen), generation_allocation_limit (gen))); assert (!"Can't allocate if no free space"); return 0; } } else { adjacentp = TRUE; } allocate_in_free: { uint8_t* result = generation_allocation_pointer (gen); size_t pad = 0; #ifdef SHORT_PLUGS if ((pad_in_front & USE_PADDING_FRONT) && (((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))==0) || ((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))>=DESIRED_PLUG_LENGTH))) { pad = Align (min_obj_size); set_padding_in_expand (old_loc, set_padding_on_saved_p, pinned_plug_entry); } #endif //SHORT_PLUGS #ifdef FEATURE_STRUCTALIGN _ASSERTE(!old_loc || alignmentOffset != 0); _ASSERTE(old_loc || requiredAlignment == DATA_ALIGNMENT); if (old_loc != 0) { size_t pad1 = ComputeStructAlignPad(result+pad, requiredAlignment, alignmentOffset); set_node_aligninfo (old_loc, requiredAlignment, pad1); pad += pad1; adjacentp = FALSE; } #else // FEATURE_STRUCTALIGN if (!((old_loc == 0) || same_large_alignment_p (old_loc, result+pad))) { pad += switch_alignment_size (pad != 0); set_node_realigned (old_loc); dprintf (3, ("Allocation realignment old_loc: %Ix, new_loc:%Ix", (size_t)old_loc, (size_t)(result+pad))); assert (same_large_alignment_p (result + pad, old_loc)); adjacentp = FALSE; } #endif // FEATURE_STRUCTALIGN if ((old_loc == 0) || (pad != 0)) { //allocating a non plug or a gap, so reset the start region generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen); } generation_allocation_pointer (gen) += size + pad; assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen)); dprintf (3, ("Allocated in expanded heap %Ix:%Id", (size_t)(result+pad), size)); dprintf (3, ("aie: ptr: %Ix, limit: %Ix, sr: %Ix", generation_allocation_pointer (gen), generation_allocation_limit (gen), generation_allocation_context_start_region (gen))); return result + pad; } } generation* gc_heap::ensure_ephemeral_heap_segment (generation* consing_gen) { heap_segment* seg = generation_allocation_segment (consing_gen); if (seg != ephemeral_heap_segment) { assert (generation_allocation_pointer (consing_gen)>= heap_segment_mem (seg)); assert (generation_allocation_pointer (consing_gen)<= heap_segment_committed (seg)); //fix the allocated size of the segment. heap_segment_plan_allocated (seg) = generation_allocation_pointer (consing_gen); generation* new_consing_gen = generation_of (max_generation - 1); generation_allocation_pointer (new_consing_gen) = heap_segment_mem (ephemeral_heap_segment); generation_allocation_limit (new_consing_gen) = generation_allocation_pointer (new_consing_gen); generation_allocation_context_start_region (new_consing_gen) = generation_allocation_pointer (new_consing_gen); generation_allocation_segment (new_consing_gen) = ephemeral_heap_segment; return new_consing_gen; } else return consing_gen; } #endif //!USE_REGIONS inline void gc_heap::init_alloc_info (generation* gen, heap_segment* seg) { generation_allocation_segment (gen) = seg; generation_allocation_pointer (gen) = heap_segment_mem (seg); generation_allocation_limit (gen) = generation_allocation_pointer (gen); generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen); } inline heap_segment* gc_heap::get_next_alloc_seg (generation* gen) { #ifdef USE_REGIONS heap_segment* saved_region = generation_allocation_segment (gen); int gen_num = heap_segment_gen_num (saved_region); heap_segment* region = saved_region; while (1) { region = heap_segment_non_sip (region); if (region) { break; } else { if (gen_num > 0) { gen_num--; region = generation_start_segment (generation_of (gen_num)); dprintf (REGIONS_LOG, ("h%d next alloc region: switching to next gen%d start %Ix(%Ix)", heap_number, heap_segment_gen_num (region), (size_t)region, heap_segment_mem (region))); } else { assert (!"ran out regions when getting the next alloc seg!"); } } } if (region != saved_region) { dprintf (REGIONS_LOG, ("init allocate region for gen%d to %Ix(%d)", gen->gen_num, heap_segment_mem (region), heap_segment_gen_num (region))); init_alloc_info (gen, region); } return region; #else return generation_allocation_segment (gen); #endif //USE_REGIONS } uint8_t* gc_heap::allocate_in_condemned_generations (generation* gen, size_t size, int from_gen_number, #ifdef SHORT_PLUGS BOOL* convert_to_pinned_p, uint8_t* next_pinned_plug, heap_segment* current_seg, #endif //SHORT_PLUGS uint8_t* old_loc REQD_ALIGN_AND_OFFSET_DCL) { #ifndef USE_REGIONS // Make sure that the youngest generation gap hasn't been allocated if (settings.promotion) { assert (generation_plan_allocation_start (youngest_generation) == 0); } #endif //!USE_REGIONS size = Align (size); assert (size >= Align (min_obj_size)); int to_gen_number = from_gen_number; if (from_gen_number != (int)max_generation) { to_gen_number = from_gen_number + (settings.promotion ? 1 : 0); } dprintf (3, ("aic gen%d: s: %Id, ac: %Ix-%Ix", gen->gen_num, size, generation_allocation_pointer (gen), generation_allocation_limit (gen))); #ifdef SHORT_PLUGS int pad_in_front = ((old_loc != 0) && (to_gen_number != max_generation)) ? USE_PADDING_FRONT : 0; #else //SHORT_PLUGS int pad_in_front = 0; #endif //SHORT_PLUGS if ((from_gen_number != -1) && (from_gen_number != (int)max_generation) && settings.promotion) { generation_condemned_allocated (generation_of (from_gen_number + (settings.promotion ? 1 : 0))) += size; generation_allocation_size (generation_of (from_gen_number + (settings.promotion ? 1 : 0))) += size; } retry: { heap_segment* seg = get_next_alloc_seg (gen); if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen), generation_allocation_limit (gen), old_loc, ((generation_allocation_limit (gen) != heap_segment_plan_allocated (seg))?USE_PADDING_TAIL:0)|pad_in_front))) { if ((! (pinned_plug_que_empty_p()) && (generation_allocation_limit (gen) == pinned_plug (oldest_pin())))) { size_t entry = deque_pinned_plug(); mark* pinned_plug_entry = pinned_plug_of (entry); size_t len = pinned_len (pinned_plug_entry); uint8_t* plug = pinned_plug (pinned_plug_entry); set_new_pin_info (pinned_plug_entry, generation_allocation_pointer (gen)); #ifdef USE_REGIONS if (to_gen_number == 0) { update_planned_gen0_free_space (pinned_len (pinned_plug_entry), plug); dprintf (REGIONS_LOG, ("aic: not promotion, gen0 added free space %Id at %Ix", pinned_len (pinned_plug_entry), plug)); } #endif //USE_REGIONS #ifdef FREE_USAGE_STATS generation_allocated_in_pinned_free (gen) += generation_allocated_since_last_pin (gen); dprintf (3, ("allocated %Id so far within pin %Ix, total->%Id", generation_allocated_since_last_pin (gen), plug, generation_allocated_in_pinned_free (gen))); generation_allocated_since_last_pin (gen) = 0; add_item_to_current_pinned_free (gen->gen_num, pinned_len (pinned_plug_of (entry))); #endif //FREE_USAGE_STATS dprintf (3, ("mark stack bos: %Id, tos: %Id, aic: p %Ix len: %Ix->%Ix", mark_stack_bos, mark_stack_tos, plug, len, pinned_len (pinned_plug_of (entry)))); assert(mark_stack_array[entry].len == 0 || mark_stack_array[entry].len >= Align(min_obj_size)); generation_allocation_pointer (gen) = plug + len; generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen); generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); set_allocator_next_pin (gen); //Add the size of the pinned plug to the right pinned allocations //find out which gen this pinned plug came from int frgn = object_gennum (plug); if ((frgn != (int)max_generation) && settings.promotion) { generation_pinned_allocation_sweep_size (generation_of (frgn + 1)) += len; #ifdef USE_REGIONS // With regions it's a bit more complicated since we only set the plan_gen_num // of a region after we've planned it. This means if the pinning plug is in the // the same seg we are planning, we haven't set its plan_gen_num yet. So we // need to check for that first. int togn = (in_range_for_segment (plug, seg) ? to_gen_number : object_gennum_plan (plug)); #else int togn = object_gennum_plan (plug); #endif //USE_REGIONS if (frgn < togn) { generation_pinned_allocation_compact_size (generation_of (togn)) += len; } } goto retry; } if (generation_allocation_limit (gen) != heap_segment_plan_allocated (seg)) { generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); dprintf (3, ("changed limit to plan alloc: %Ix", generation_allocation_limit (gen))); } else { if (heap_segment_plan_allocated (seg) != heap_segment_committed (seg)) { heap_segment_plan_allocated (seg) = heap_segment_committed (seg); generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); dprintf (3, ("changed limit to commit: %Ix", generation_allocation_limit (gen))); } else { #if !defined(RESPECT_LARGE_ALIGNMENT) && !defined(USE_REGIONS) assert (gen != youngest_generation); #endif //!RESPECT_LARGE_ALIGNMENT && !USE_REGIONS if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen), heap_segment_reserved (seg), old_loc, USE_PADDING_TAIL | pad_in_front) && (grow_heap_segment (seg, generation_allocation_pointer (gen), old_loc, size, pad_in_front REQD_ALIGN_AND_OFFSET_ARG))) { dprintf (3, ("Expanded segment allocation by committing more memory")); heap_segment_plan_allocated (seg) = heap_segment_committed (seg); generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); } else { heap_segment* next_seg = heap_segment_next (seg); dprintf (REGIONS_LOG, ("aic next: %Ix(%Ix,%Ix) -> %Ix(%Ix,%Ix)", heap_segment_mem (seg), heap_segment_allocated (seg), heap_segment_plan_allocated (seg), (next_seg ? heap_segment_mem (next_seg) : 0), (next_seg ? heap_segment_allocated (next_seg) : 0), (next_seg ? heap_segment_plan_allocated (next_seg) : 0))); assert (generation_allocation_pointer (gen)>= heap_segment_mem (seg)); // Verify that all pinned plugs for this segment are consumed if (!pinned_plug_que_empty_p() && ((pinned_plug (oldest_pin()) < heap_segment_allocated (seg)) && (pinned_plug (oldest_pin()) >= generation_allocation_pointer (gen)))) { LOG((LF_GC, LL_INFO10, "remaining pinned plug %Ix while leaving segment on allocation", pinned_plug (oldest_pin()))); FATAL_GC_ERROR(); } assert (generation_allocation_pointer (gen)>= heap_segment_mem (seg)); assert (generation_allocation_pointer (gen)<= heap_segment_committed (seg)); heap_segment_plan_allocated (seg) = generation_allocation_pointer (gen); #ifdef USE_REGIONS set_region_plan_gen_num (seg, to_gen_number); if ((next_seg == 0) && (heap_segment_gen_num (seg) > 0)) { // We need to switch to a younger gen's segments so the allocate seg will be in // sync with the pins. next_seg = generation_start_segment (generation_of (heap_segment_gen_num (seg) - 1)); dprintf (REGIONS_LOG, ("h%d aic: switching to next gen%d start %Ix(%Ix)", heap_number, heap_segment_gen_num (next_seg), (size_t)next_seg, heap_segment_mem (next_seg))); } #endif //USE_REGIONS if (next_seg) { init_alloc_info (gen, next_seg); } else { #ifdef USE_REGIONS assert (!"should not happen for regions!"); #else return 0; //should only happen during allocation of generation 0 gap // in that case we are going to grow the heap anyway #endif //USE_REGIONS } } } } set_allocator_next_pin (gen); goto retry; } } { assert (generation_allocation_pointer (gen)>= heap_segment_mem (generation_allocation_segment (gen))); uint8_t* result = generation_allocation_pointer (gen); size_t pad = 0; #ifdef SHORT_PLUGS if ((pad_in_front & USE_PADDING_FRONT) && (((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))==0) || ((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))>=DESIRED_PLUG_LENGTH))) { ptrdiff_t dist = old_loc - result; if (dist == 0) { dprintf (3, ("old alloc: %Ix, same as new alloc, not padding", old_loc)); pad = 0; } else { if ((dist > 0) && (dist < (ptrdiff_t)Align (min_obj_size))) { dprintf (1, ("old alloc: %Ix, only %d bytes > new alloc! Shouldn't happen", old_loc, dist)); FATAL_GC_ERROR(); } pad = Align (min_obj_size); set_plug_padded (old_loc); } } #endif //SHORT_PLUGS #ifdef FEATURE_STRUCTALIGN _ASSERTE(!old_loc || alignmentOffset != 0); _ASSERTE(old_loc || requiredAlignment == DATA_ALIGNMENT); if ((old_loc != 0)) { size_t pad1 = ComputeStructAlignPad(result+pad, requiredAlignment, alignmentOffset); set_node_aligninfo (old_loc, requiredAlignment, pad1); pad += pad1; } #else // FEATURE_STRUCTALIGN if (!((old_loc == 0) || same_large_alignment_p (old_loc, result+pad))) { pad += switch_alignment_size (pad != 0); set_node_realigned(old_loc); dprintf (3, ("Allocation realignment old_loc: %Ix, new_loc:%Ix", (size_t)old_loc, (size_t)(result+pad))); assert (same_large_alignment_p (result + pad, old_loc)); } #endif // FEATURE_STRUCTALIGN #ifdef SHORT_PLUGS if ((next_pinned_plug != 0) && (pad != 0) && (generation_allocation_segment (gen) == current_seg)) { assert (old_loc != 0); ptrdiff_t dist_to_next_pin = (ptrdiff_t)(next_pinned_plug - (generation_allocation_pointer (gen) + size + pad)); assert (dist_to_next_pin >= 0); if ((dist_to_next_pin >= 0) && (dist_to_next_pin < (ptrdiff_t)Align (min_obj_size))) { dprintf (3, ("%Ix->(%Ix,%Ix),%Ix(%Ix)(%Ix),NP->PP", old_loc, generation_allocation_pointer (gen), generation_allocation_limit (gen), next_pinned_plug, size, dist_to_next_pin)); clear_plug_padded (old_loc); pad = 0; *convert_to_pinned_p = TRUE; record_interesting_data_point (idp_converted_pin); return 0; } } #endif //SHORT_PLUGS if ((old_loc == 0) || (pad != 0)) { //allocating a non plug or a gap, so reset the start region generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen); } generation_allocation_pointer (gen) += size + pad; assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen)); if ((pad > 0) && (to_gen_number >= 0)) { generation_free_obj_space (generation_of (to_gen_number)) += pad; } #ifdef FREE_USAGE_STATS generation_allocated_since_last_pin (gen) += size; #endif //FREE_USAGE_STATS dprintf (3, ("aic: old: %Ix ptr: %Ix, limit: %Ix, sr: %Ix, res: %Ix, pad: %Id", old_loc, generation_allocation_pointer (gen), generation_allocation_limit (gen), generation_allocation_context_start_region (gen), result, (size_t)pad)); assert (result + pad); return result + pad; } } int gc_heap::joined_generation_to_condemn (BOOL should_evaluate_elevation, int initial_gen, int current_gen, BOOL* blocking_collection_p STRESS_HEAP_ARG(int n_original)) { gc_data_global.gen_to_condemn_reasons.init(); #ifdef BGC_SERVO_TUNING if (settings.entry_memory_load == 0) { uint32_t current_memory_load = 0; uint64_t current_available_physical = 0; get_memory_info (&current_memory_load, &current_available_physical); settings.entry_memory_load = current_memory_load; settings.entry_available_physical_mem = current_available_physical; } #endif //BGC_SERVO_TUNING int n = current_gen; #ifdef MULTIPLE_HEAPS BOOL joined_last_gc_before_oom = FALSE; for (int i = 0; i < n_heaps; i++) { if (g_heaps[i]->last_gc_before_oom) { dprintf (GTC_LOG, ("h%d is setting blocking to TRUE", i)); joined_last_gc_before_oom = TRUE; break; } } #else BOOL joined_last_gc_before_oom = last_gc_before_oom; #endif //MULTIPLE_HEAPS if (joined_last_gc_before_oom && settings.pause_mode != pause_low_latency) { assert (*blocking_collection_p); } if (should_evaluate_elevation && (n == max_generation)) { dprintf (GTC_LOG, ("lock: %d(%d)", (settings.should_lock_elevation ? 1 : 0), settings.elevation_locked_count)); if (settings.should_lock_elevation) { settings.elevation_locked_count++; if (settings.elevation_locked_count == 6) { settings.elevation_locked_count = 0; } else { n = max_generation - 1; gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_avoid_unproductive); settings.elevation_reduced = TRUE; } } else { settings.elevation_locked_count = 0; } } else { settings.should_lock_elevation = FALSE; settings.elevation_locked_count = 0; } if (provisional_mode_triggered && (n == max_generation)) { // There are a few cases where we should not reduce the generation. if ((initial_gen == max_generation) || (settings.reason == reason_alloc_loh)) { // If we are doing a full GC in the provisional mode, we always // make it blocking because we don't want to get into a situation // where foreground GCs are asking for a compacting full GC right away // and not getting it. dprintf (GTC_LOG, ("full GC induced, not reducing gen")); if (initial_gen == max_generation) { gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_pm_induced_fullgc_p); } else { gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_pm_alloc_loh); } *blocking_collection_p = TRUE; } else if (should_expand_in_full_gc || joined_last_gc_before_oom) { dprintf (GTC_LOG, ("need full blocking GCs to expand heap or avoid OOM, not reducing gen")); assert (*blocking_collection_p); } else { dprintf (GTC_LOG, ("reducing gen in PM: %d->%d->%d", initial_gen, n, (max_generation - 1))); gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_gen1_in_pm); n = max_generation - 1; } } if (should_expand_in_full_gc) { should_expand_in_full_gc = FALSE; } if (heap_hard_limit) { // If we have already consumed 90% of the limit, we should check to see if we should compact LOH. // TODO: should unify this with gen2. dprintf (GTC_LOG, ("committed %Id is %d%% of limit %Id", current_total_committed, (int)((float)current_total_committed * 100.0 / (float)heap_hard_limit), heap_hard_limit)); bool full_compact_gc_p = false; if (joined_last_gc_before_oom) { gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_limit_before_oom); full_compact_gc_p = true; } else if ((current_total_committed * 10) >= (heap_hard_limit * 9)) { size_t loh_frag = get_total_gen_fragmentation (loh_generation); // If the LOH frag is >= 1/8 it's worth compacting it if ((loh_frag * 8) >= heap_hard_limit) { dprintf (GTC_LOG, ("loh frag: %Id > 1/8 of limit %Id", loh_frag, (heap_hard_limit / 8))); gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_limit_loh_frag); full_compact_gc_p = true; } else { // If there's not much fragmentation but it looks like it'll be productive to // collect LOH, do that. size_t est_loh_reclaim = get_total_gen_estimated_reclaim (loh_generation); if ((est_loh_reclaim * 8) >= heap_hard_limit) { gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_limit_loh_reclaim); full_compact_gc_p = true; } dprintf (GTC_LOG, ("loh est reclaim: %Id, 1/8 of limit %Id", est_loh_reclaim, (heap_hard_limit / 8))); } } if (full_compact_gc_p) { n = max_generation; *blocking_collection_p = TRUE; settings.loh_compaction = TRUE; dprintf (GTC_LOG, ("compacting LOH due to hard limit")); } } if ((conserve_mem_setting != 0) && (n == max_generation)) { float frag_limit = 1.0f - conserve_mem_setting / 10.0f; size_t loh_size = get_total_gen_size (loh_generation); size_t gen2_size = get_total_gen_size (max_generation); float loh_frag_ratio = 0.0f; float combined_frag_ratio = 0.0f; if (loh_size != 0) { size_t loh_frag = get_total_gen_fragmentation (loh_generation); size_t gen2_frag = get_total_gen_fragmentation (max_generation); loh_frag_ratio = (float)loh_frag / (float)loh_size; combined_frag_ratio = (float)(gen2_frag + loh_frag) / (float)(gen2_size + loh_size); } if (combined_frag_ratio > frag_limit) { dprintf (GTC_LOG, ("combined frag: %f > limit %f, loh frag: %f", combined_frag_ratio, frag_limit, loh_frag_ratio)); gc_data_global.gen_to_condemn_reasons.set_condition (gen_max_high_frag_p); n = max_generation; *blocking_collection_p = TRUE; if (loh_frag_ratio > frag_limit) { settings.loh_compaction = TRUE; dprintf (GTC_LOG, ("compacting LOH due to GCConserveMem setting")); } } } #ifdef BGC_SERVO_TUNING if (bgc_tuning::should_trigger_ngc2()) { gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_servo_ngc); n = max_generation; *blocking_collection_p = TRUE; } if ((n < max_generation) && !gc_heap::background_running_p() && bgc_tuning::stepping_trigger (settings.entry_memory_load, get_current_gc_index (max_generation))) { gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_servo_initial); n = max_generation; saved_bgc_tuning_reason = reason_bgc_stepping; } if ((n < max_generation) && bgc_tuning::should_trigger_bgc()) { gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_servo_bgc); n = max_generation; } if (n == (max_generation - 1)) { if (bgc_tuning::should_delay_alloc (max_generation)) { gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_servo_postpone); n -= 1; } } #endif //BGC_SERVO_TUNING if ((n == max_generation) && (*blocking_collection_p == FALSE)) { // If we are doing a gen2 we should reset elevation regardless and let the gen2 // decide if we should lock again or in the bgc case by design we will not retract // gen1 start. settings.should_lock_elevation = FALSE; settings.elevation_locked_count = 0; dprintf (GTC_LOG, ("doing bgc, reset elevation")); } #ifdef STRESS_HEAP #ifdef BACKGROUND_GC // We can only do Concurrent GC Stress if the caller did not explicitly ask for all // generations to be collected, // // [LOCALGC TODO] STRESS_HEAP is not defined for a standalone GC so there are multiple // things that need to be fixed in this code block. if (n_original != max_generation && g_pConfig->GetGCStressLevel() && gc_can_use_concurrent) { #ifndef FEATURE_REDHAWK if (*blocking_collection_p) { // We call StressHeap() a lot for Concurrent GC Stress. However, // if we can not do a concurrent collection, no need to stress anymore. // @TODO: Enable stress when the memory pressure goes down again GCStressPolicy::GlobalDisable(); } else #endif // !FEATURE_REDHAWK { gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_stress); n = max_generation; } } #endif //BACKGROUND_GC #endif //STRESS_HEAP #ifdef BACKGROUND_GC if ((n == max_generation) && background_running_p()) { n = max_generation - 1; dprintf (GTC_LOG, ("bgc in progress - 1 instead of 2")); } #endif //BACKGROUND_GC return n; } inline size_t get_survived_size (gc_history_per_heap* hist) { size_t surv_size = 0; gc_generation_data* gen_data; for (int gen_number = 0; gen_number < total_generation_count; gen_number++) { gen_data = &(hist->gen_data[gen_number]); surv_size += (gen_data->size_after - gen_data->free_list_space_after - gen_data->free_obj_space_after); } return surv_size; } size_t gc_heap::get_total_survived_size() { size_t total_surv_size = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; gc_history_per_heap* current_gc_data_per_heap = hp->get_gc_data_per_heap(); total_surv_size += get_survived_size (current_gc_data_per_heap); } #else gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap(); total_surv_size = get_survived_size (current_gc_data_per_heap); #endif //MULTIPLE_HEAPS return total_surv_size; } size_t gc_heap::get_total_allocated_since_last_gc() { size_t total_allocated_size = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS total_allocated_size += hp->allocated_since_last_gc[0] + hp->allocated_since_last_gc[1]; hp->allocated_since_last_gc[0] = 0; hp->allocated_since_last_gc[1] = 0; } return total_allocated_size; } // Gets what's allocated on both SOH, LOH, etc that hasn't been collected. size_t gc_heap::get_current_allocated() { dynamic_data* dd = dynamic_data_of (0); size_t current_alloc = dd_desired_allocation (dd) - dd_new_allocation (dd); for (int i = uoh_start_generation; i < total_generation_count; i++) { dynamic_data* dd = dynamic_data_of (i); current_alloc += dd_desired_allocation (dd) - dd_new_allocation (dd); } return current_alloc; } size_t gc_heap::get_total_allocated() { size_t total_current_allocated = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; total_current_allocated += hp->get_current_allocated(); } #else total_current_allocated = get_current_allocated(); #endif //MULTIPLE_HEAPS return total_current_allocated; } size_t gc_heap::get_total_promoted() { size_t total_promoted_size = 0; int highest_gen = ((settings.condemned_generation == max_generation) ? (total_generation_count - 1) : settings.condemned_generation); #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS for (int gen_number = 0; gen_number <= highest_gen; gen_number++) { total_promoted_size += dd_promoted_size (hp->dynamic_data_of (gen_number)); } } return total_promoted_size; } #ifdef BGC_SERVO_TUNING size_t gc_heap::get_total_generation_size (int gen_number) { size_t total_generation_size = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS total_generation_size += hp->generation_size (gen_number); } return total_generation_size; } // gets all that's allocated into the gen. This is only used for gen2/3 // for servo tuning. size_t gc_heap::get_total_servo_alloc (int gen_number) { size_t total_alloc = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS generation* gen = hp->generation_of (gen_number); total_alloc += generation_free_list_allocated (gen); total_alloc += generation_end_seg_allocated (gen); total_alloc += generation_condemned_allocated (gen); total_alloc += generation_sweep_allocated (gen); } return total_alloc; } size_t gc_heap::get_total_bgc_promoted() { size_t total_bgc_promoted = 0; #ifdef MULTIPLE_HEAPS int num_heaps = gc_heap::n_heaps; #else //MULTIPLE_HEAPS int num_heaps = 1; #endif //MULTIPLE_HEAPS for (int i = 0; i < num_heaps; i++) { total_bgc_promoted += bpromoted_bytes (i); } return total_bgc_promoted; } // This is called after compute_new_dynamic_data is called, at which point // dd_current_size is calculated. size_t gc_heap::get_total_surv_size (int gen_number) { size_t total_surv_size = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS total_surv_size += dd_current_size (hp->dynamic_data_of (gen_number)); } return total_surv_size; } size_t gc_heap::get_total_begin_data_size (int gen_number) { size_t total_begin_data_size = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS total_begin_data_size += dd_begin_data_size (hp->dynamic_data_of (gen_number)); } return total_begin_data_size; } size_t gc_heap::get_total_generation_fl_size (int gen_number) { size_t total_generation_fl_size = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS total_generation_fl_size += generation_free_list_space (hp->generation_of (gen_number)); } return total_generation_fl_size; } size_t gc_heap::get_current_gc_index (int gen_number) { #ifdef MULTIPLE_HEAPS gc_heap* hp = gc_heap::g_heaps[0]; return dd_collection_count (hp->dynamic_data_of (gen_number)); #else return dd_collection_count (dynamic_data_of (gen_number)); #endif //MULTIPLE_HEAPS } #endif //BGC_SERVO_TUNING size_t gc_heap::current_generation_size (int gen_number) { dynamic_data* dd = dynamic_data_of (gen_number); size_t gen_size = (dd_current_size (dd) + dd_desired_allocation (dd) - dd_new_allocation (dd)); return gen_size; } #ifdef USE_REGIONS // We may need a new empty region while doing a GC so try to get one now, if we don't have any // reserve in the free region list. bool gc_heap::try_get_new_free_region() { heap_segment* region = 0; if (free_regions[basic_free_region].get_num_free_regions() > 0) { dprintf (REGIONS_LOG, ("h%d has %d free regions %Ix", heap_number, free_regions[basic_free_region].get_num_free_regions(), heap_segment_mem (free_regions[basic_free_region].get_first_free_region()))); return true; } else { region = allocate_new_region (__this, 0, false); if (region) { if (init_table_for_region (0, region)) { return_free_region (region); dprintf (REGIONS_LOG, ("h%d got a new empty region %Ix", heap_number, region)); } else { region = 0; } } } return (region != 0); } bool gc_heap::init_table_for_region (int gen_number, heap_segment* region) { #ifdef BACKGROUND_GC if (is_bgc_in_progress()) { dprintf (GC_TABLE_LOG, ("new seg %Ix, mark_array is %Ix", heap_segment_mem (region), mark_array)); if (!commit_mark_array_new_seg (__this, region)) { dprintf (GC_TABLE_LOG, ("failed to commit mark array for the new region %Ix-%Ix", get_region_start (region), heap_segment_reserved (region))); // We don't have memory to commit the mark array so we cannot use the new region. global_region_allocator.delete_region (get_region_start (region)); return false; } } #endif //BACKGROUND_GC if (gen_number <= max_generation) { size_t first_brick = brick_of (heap_segment_mem (region)); set_brick (first_brick, -1); } else { assert (brick_table[brick_of (heap_segment_mem (region))] == 0); } return true; } #endif //USE_REGIONS #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:6326) // "Potential comparison of a constant with another constant" is intentional in this function. #endif //_PREFAST_ /* This is called by when we are actually doing a GC, or when we are just checking whether we would do a full blocking GC, in which case check_only_p is TRUE. The difference between calling this with check_only_p TRUE and FALSE is that when it's TRUE: settings.reason is ignored budgets are not checked (since they are checked before this is called) it doesn't change anything non local like generation_skip_ratio */ int gc_heap::generation_to_condemn (int n_initial, BOOL* blocking_collection_p, BOOL* elevation_requested_p, BOOL check_only_p) { gc_mechanisms temp_settings = settings; gen_to_condemn_tuning temp_condemn_reasons; gc_mechanisms* local_settings = (check_only_p ? &temp_settings : &settings); gen_to_condemn_tuning* local_condemn_reasons = (check_only_p ? &temp_condemn_reasons : &gen_to_condemn_reasons); if (!check_only_p) { if ((local_settings->reason == reason_oos_soh) || (local_settings->reason == reason_oos_loh)) { assert (n_initial >= 1); } assert (settings.reason != reason_empty); } local_condemn_reasons->init(); int n = n_initial; int n_alloc = n; if (heap_number == 0) { dprintf (GTC_LOG, ("init: %d(%d)", n_initial, settings.reason)); } int i = 0; int temp_gen = 0; BOOL low_memory_detected = g_low_memory_status; uint32_t memory_load = 0; uint64_t available_physical = 0; uint64_t available_page_file = 0; BOOL check_memory = FALSE; BOOL high_fragmentation = FALSE; BOOL v_high_memory_load = FALSE; BOOL high_memory_load = FALSE; BOOL low_ephemeral_space = FALSE; BOOL evaluate_elevation = TRUE; *elevation_requested_p = FALSE; *blocking_collection_p = FALSE; BOOL check_max_gen_alloc = TRUE; #ifdef STRESS_HEAP int orig_gen = n; #endif //STRESS_HEAP if (!check_only_p) { dd_fragmentation (dynamic_data_of (0)) = generation_free_list_space (youngest_generation) + generation_free_obj_space (youngest_generation); for (int i = uoh_start_generation; i < total_generation_count; i++) { dd_fragmentation (dynamic_data_of (i)) = generation_free_list_space (generation_of (i)) + generation_free_obj_space (generation_of (i)); } //save new_allocation for (i = 0; i < total_generation_count; i++) { dynamic_data* dd = dynamic_data_of (i); dprintf (GTC_LOG, ("h%d: g%d: l: %Id (%Id)", heap_number, i, dd_new_allocation (dd), dd_desired_allocation (dd))); dd_gc_new_allocation (dd) = dd_new_allocation (dd); } local_condemn_reasons->set_gen (gen_initial, n); temp_gen = n; #ifdef BACKGROUND_GC if (gc_heap::background_running_p() #ifdef BGC_SERVO_TUNING || bgc_tuning::fl_tuning_triggered || (bgc_tuning::enable_fl_tuning && bgc_tuning::use_stepping_trigger_p) #endif //BGC_SERVO_TUNING ) { check_max_gen_alloc = FALSE; } #endif //BACKGROUND_GC if (check_max_gen_alloc) { //figure out if UOH objects need to be collected. for (int i = uoh_start_generation; i < total_generation_count; i++) { if (get_new_allocation (i) <= 0) { n = max_generation; local_condemn_reasons->set_gen (gen_alloc_budget, n); dprintf (BGC_TUNING_LOG, ("BTL[GTC]: trigger based on gen%d b: %Id", (i), get_new_allocation (i))); break; } } } //figure out which generation ran out of allocation for (i = n+1; i <= (check_max_gen_alloc ? max_generation : (max_generation - 1)); i++) { if (get_new_allocation (i) <= 0) { n = i; if (n == max_generation) { dprintf (BGC_TUNING_LOG, ("BTL[GTC]: trigger based on gen2 b: %Id", get_new_allocation (max_generation))); } } else break; } } if (n > temp_gen) { local_condemn_reasons->set_gen (gen_alloc_budget, n); } dprintf (GTC_LOG, ("h%d: g%d budget", heap_number, ((get_new_allocation (loh_generation) <= 0) ? 3 : n))); n_alloc = n; #if defined(BACKGROUND_GC) && !defined(MULTIPLE_HEAPS) //time based tuning // if enough time has elapsed since the last gc // and the number of gc is too low (1/10 of lower gen) then collect // This should also be enabled if we have memory concerns int n_time_max = max_generation; if (!check_only_p) { if (!check_max_gen_alloc) { n_time_max = max_generation - 1; } } if ((local_settings->pause_mode == pause_interactive) || (local_settings->pause_mode == pause_sustained_low_latency)) { dynamic_data* dd0 = dynamic_data_of (0); uint64_t now = GetHighPrecisionTimeStamp(); temp_gen = n; for (i = (temp_gen+1); i <= n_time_max; i++) { dynamic_data* dd = dynamic_data_of (i); if ((now > dd_time_clock(dd) + dd_time_clock_interval(dd)) && (dd_gc_clock (dd0) > (dd_gc_clock (dd) + dd_gc_clock_interval(dd))) && ((n < max_generation) || ((dd_current_size (dd) < dd_max_size (dd0))))) { n = min (i, n_time_max); dprintf (GTC_LOG, ("time %d", n)); } } if (n > temp_gen) { local_condemn_reasons->set_gen (gen_time_tuning, n); if (n == max_generation) { dprintf (BGC_TUNING_LOG, ("BTL[GTC]: trigger based on time")); } } } if (n != n_alloc) { dprintf (GTC_LOG, ("Condemning %d based on time tuning and fragmentation", n)); } #endif //BACKGROUND_GC && !MULTIPLE_HEAPS if (n < (max_generation - 1)) { if (dt_low_card_table_efficiency_p (tuning_deciding_condemned_gen)) { n = max (n, max_generation - 1); local_settings->promotion = TRUE; dprintf (GTC_LOG, ("h%d: skip %d, c %d", heap_number, generation_skip_ratio, n)); local_condemn_reasons->set_condition (gen_low_card_p); } } if (!check_only_p) { generation_skip_ratio = 100; } if (dt_low_ephemeral_space_p (check_only_p ? tuning_deciding_full_gc : tuning_deciding_condemned_gen)) { low_ephemeral_space = TRUE; n = max (n, max_generation - 1); local_condemn_reasons->set_condition (gen_low_ephemeral_p); dprintf (GTC_LOG, ("h%d: low eph", heap_number)); if (!provisional_mode_triggered) { #ifdef BACKGROUND_GC if (!gc_can_use_concurrent || (generation_free_list_space (generation_of (max_generation)) == 0)) #endif //BACKGROUND_GC { //It is better to defragment first if we are running out of space for //the ephemeral generation but we have enough fragmentation to make up for it //in the non ephemeral generation. Essentially we are trading a gen2 for // having to expand heap in ephemeral collections. if (dt_high_frag_p (tuning_deciding_condemned_gen, max_generation - 1, TRUE)) { high_fragmentation = TRUE; local_condemn_reasons->set_condition (gen_max_high_frag_e_p); dprintf (GTC_LOG, ("heap%d: gen1 frag", heap_number)); } } } } #ifdef USE_REGIONS if (!try_get_new_free_region()) { dprintf (GTC_LOG, ("can't get an empty region -> full compacting")); last_gc_before_oom = TRUE; } #endif //USE_REGIONS //figure out which ephemeral generation is too fragmented temp_gen = n; for (i = n+1; i < max_generation; i++) { if (dt_high_frag_p (tuning_deciding_condemned_gen, i)) { dprintf (GTC_LOG, ("h%d g%d too frag", heap_number, i)); n = i; } else break; } if (low_ephemeral_space) { //enable promotion local_settings->promotion = TRUE; } if (n > temp_gen) { local_condemn_reasons->set_condition (gen_eph_high_frag_p); } if (!check_only_p) { if (settings.pause_mode == pause_low_latency) { if (!is_induced (settings.reason)) { n = min (n, max_generation - 1); dprintf (GTC_LOG, ("low latency mode is enabled, condemning %d", n)); evaluate_elevation = FALSE; goto exit; } } } // It's hard to catch when we get to the point that the memory load is so high // we get an induced GC from the finalizer thread so we are checking the memory load // for every gen0 GC. check_memory = (check_only_p ? (n >= 0) : ((n >= 1) || low_memory_detected)); if (check_memory) { //find out if we are short on memory get_memory_info (&memory_load, &available_physical, &available_page_file); if (heap_number == 0) { dprintf (GTC_LOG, ("ml: %d", memory_load)); } #ifdef USE_REGIONS // For regions we want to take the VA range into consideration as well. uint32_t va_memory_load = global_region_allocator.get_va_memory_load(); if (heap_number == 0) { dprintf (GTC_LOG, ("h%d ML %d, va ML %d", heap_number, memory_load, va_memory_load)); } memory_load = max (memory_load, va_memory_load); #endif //USE_REGIONS // Need to get it early enough for all heaps to use. local_settings->entry_available_physical_mem = available_physical; local_settings->entry_memory_load = memory_load; // @TODO: Force compaction more often under GCSTRESS if (memory_load >= high_memory_load_th || low_memory_detected) { #ifdef SIMPLE_DPRINTF // stress log can't handle any parameter that's bigger than a void*. if (heap_number == 0) { dprintf (GTC_LOG, ("tp: %I64d, ap: %I64d", total_physical_mem, available_physical)); } #endif //SIMPLE_DPRINTF high_memory_load = TRUE; if (memory_load >= v_high_memory_load_th || low_memory_detected) { // TODO: Perhaps in 64-bit we should be estimating gen1's fragmentation as well since // gen1/gen0 may take a lot more memory than gen2. if (!high_fragmentation) { high_fragmentation = dt_estimate_reclaim_space_p (tuning_deciding_condemned_gen, max_generation); } v_high_memory_load = TRUE; } else { if (!high_fragmentation) { high_fragmentation = dt_estimate_high_frag_p (tuning_deciding_condemned_gen, max_generation, available_physical); } } if (high_fragmentation) { if (high_memory_load) { local_condemn_reasons->set_condition (gen_max_high_frag_m_p); } else if (v_high_memory_load) { local_condemn_reasons->set_condition (gen_max_high_frag_vm_p); } } } } dprintf (GTC_LOG, ("h%d: le: %d, hm: %d, vm: %d, f: %d", heap_number, low_ephemeral_space, high_memory_load, v_high_memory_load, high_fragmentation)); if (should_expand_in_full_gc) { dprintf (GTC_LOG, ("h%d: expand_in_full - BLOCK", heap_number)); *blocking_collection_p = TRUE; evaluate_elevation = FALSE; n = max_generation; local_condemn_reasons->set_condition (gen_expand_fullgc_p); } if (last_gc_before_oom) { dprintf (GTC_LOG, ("h%d: alloc full - BLOCK", heap_number)); n = max_generation; *blocking_collection_p = TRUE; if ((local_settings->reason == reason_oos_loh) || (local_settings->reason == reason_alloc_loh)) { evaluate_elevation = FALSE; } local_condemn_reasons->set_condition (gen_before_oom); } if (!check_only_p) { if (is_induced_blocking (settings.reason) && n_initial == max_generation IN_STRESS_HEAP( && !settings.stress_induced )) { if (heap_number == 0) { dprintf (GTC_LOG, ("induced - BLOCK")); } *blocking_collection_p = TRUE; local_condemn_reasons->set_condition (gen_induced_fullgc_p); evaluate_elevation = FALSE; } if (settings.reason == reason_induced_noforce) { local_condemn_reasons->set_condition (gen_induced_noforce_p); evaluate_elevation = FALSE; } } if (!provisional_mode_triggered && evaluate_elevation && (low_ephemeral_space || high_memory_load || v_high_memory_load)) { *elevation_requested_p = TRUE; #ifdef HOST_64BIT // if we are in high memory load and have consumed 10% of the gen2 budget, do a gen2 now. if (high_memory_load || v_high_memory_load) { dynamic_data* dd_max = dynamic_data_of (max_generation); if (((float)dd_new_allocation (dd_max) / (float)dd_desired_allocation (dd_max)) < 0.9) { dprintf (GTC_LOG, ("%Id left in gen2 alloc (%Id)", dd_new_allocation (dd_max), dd_desired_allocation (dd_max))); n = max_generation; local_condemn_reasons->set_condition (gen_almost_max_alloc); } } if (n <= max_generation) { #endif // HOST_64BIT if (high_fragmentation) { //elevate to max_generation n = max_generation; dprintf (GTC_LOG, ("h%d: f full", heap_number)); #ifdef BACKGROUND_GC if (high_memory_load || v_high_memory_load) { // For background GC we want to do blocking collections more eagerly because we don't // want to get into the situation where the memory load becomes high while we are in // a background GC and we'd have to wait for the background GC to finish to start // a blocking collection (right now the implemenation doesn't handle converting // a background GC to a blocking collection midway. dprintf (GTC_LOG, ("h%d: bgc - BLOCK", heap_number)); *blocking_collection_p = TRUE; } #else if (v_high_memory_load) { dprintf (GTC_LOG, ("h%d: - BLOCK", heap_number)); *blocking_collection_p = TRUE; } #endif //BACKGROUND_GC } else { n = max (n, max_generation - 1); dprintf (GTC_LOG, ("h%d: nf c %d", heap_number, n)); } #ifdef HOST_64BIT } #endif // HOST_64BIT } if (!provisional_mode_triggered && (n == (max_generation - 1)) && (n_alloc < (max_generation -1))) { #ifdef BGC_SERVO_TUNING if (!bgc_tuning::enable_fl_tuning) #endif //BGC_SERVO_TUNING { dprintf (GTC_LOG, ("h%d: budget %d, check 2", heap_number, n_alloc)); if (get_new_allocation (max_generation) <= 0) { dprintf (GTC_LOG, ("h%d: budget alloc", heap_number)); n = max_generation; local_condemn_reasons->set_condition (gen_max_gen1); } } } //figure out if max_generation is too fragmented -> blocking collection if (!provisional_mode_triggered #ifdef BGC_SERVO_TUNING && !bgc_tuning::enable_fl_tuning #endif //BGC_SERVO_TUNING && (n == max_generation)) { if (dt_high_frag_p (tuning_deciding_condemned_gen, n)) { dprintf (GTC_LOG, ("h%d: g%d too frag", heap_number, n)); local_condemn_reasons->set_condition (gen_max_high_frag_p); if (local_settings->pause_mode != pause_sustained_low_latency) { *blocking_collection_p = TRUE; } } } #ifdef BACKGROUND_GC if ((n == max_generation) && !(*blocking_collection_p)) { if (heap_number == 0) { BOOL bgc_heap_too_small = TRUE; size_t gen2size = 0; size_t gen3size = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { if (((g_heaps[i]->current_generation_size (max_generation)) > bgc_min_per_heap) || ((g_heaps[i]->current_generation_size (loh_generation)) > bgc_min_per_heap) || ((g_heaps[i]->current_generation_size (poh_generation)) > bgc_min_per_heap)) { bgc_heap_too_small = FALSE; break; } } #else //MULTIPLE_HEAPS if ((current_generation_size (max_generation) > bgc_min_per_heap) || (current_generation_size (loh_generation) > bgc_min_per_heap) || (current_generation_size (poh_generation) > bgc_min_per_heap)) { bgc_heap_too_small = FALSE; } #endif //MULTIPLE_HEAPS if (bgc_heap_too_small) { dprintf (GTC_LOG, ("gen2 and gen3 too small")); #ifdef STRESS_HEAP // do not turn stress-induced collections into blocking GCs if (!settings.stress_induced) #endif //STRESS_HEAP { *blocking_collection_p = TRUE; } local_condemn_reasons->set_condition (gen_gen2_too_small); } } } #endif //BACKGROUND_GC exit: if (!check_only_p) { #ifdef STRESS_HEAP #ifdef BACKGROUND_GC // We can only do Concurrent GC Stress if the caller did not explicitly ask for all // generations to be collected, if (orig_gen != max_generation && g_pConfig->GetGCStressLevel() && gc_can_use_concurrent) { *elevation_requested_p = FALSE; } #endif //BACKGROUND_GC #endif //STRESS_HEAP if (check_memory) { fgm_result.available_pagefile_mb = (size_t)(available_page_file / (1024 * 1024)); } local_condemn_reasons->set_gen (gen_final_per_heap, n); get_gc_data_per_heap()->gen_to_condemn_reasons.init (local_condemn_reasons); #ifdef DT_LOG local_condemn_reasons->print (heap_number); #endif //DT_LOG if ((local_settings->reason == reason_oos_soh) || (local_settings->reason == reason_oos_loh)) { assert (n >= 1); } } return n; } #ifdef _PREFAST_ #pragma warning(pop) #endif //_PREFAST_ inline size_t gc_heap::min_reclaim_fragmentation_threshold (uint32_t num_heaps) { // if the memory load is higher, the threshold we'd want to collect gets lower. size_t min_mem_based_on_available = (500 - (settings.entry_memory_load - high_memory_load_th) * 40) * 1024 * 1024 / num_heaps; size_t ten_percent_size = (size_t)((float)generation_size (max_generation) * 0.10); uint64_t three_percent_mem = mem_one_percent * 3 / num_heaps; #ifdef SIMPLE_DPRINTF dprintf (GTC_LOG, ("min av: %Id, 10%% gen2: %Id, 3%% mem: %I64d", min_mem_based_on_available, ten_percent_size, three_percent_mem)); #endif //SIMPLE_DPRINTF return (size_t)(min (min_mem_based_on_available, min (ten_percent_size, three_percent_mem))); } inline uint64_t gc_heap::min_high_fragmentation_threshold(uint64_t available_mem, uint32_t num_heaps) { return min (available_mem, (256*1024*1024)) / num_heaps; } enum { CORINFO_EXCEPTION_GC = 0xE0004743 // 'GC' }; #ifdef BACKGROUND_GC void gc_heap::init_background_gc () { //reset the allocation so foreground gc can allocate into older (max_generation) generation generation* gen = generation_of (max_generation); generation_allocation_pointer (gen)= 0; generation_allocation_limit (gen) = 0; generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(generation_allocation_segment(gen) != NULL); #ifdef DOUBLY_LINKED_FL generation_set_bgc_mark_bit_p (gen) = FALSE; #endif //DOUBLY_LINKED_FL #ifndef USE_REGIONS //reset the plan allocation for each segment for (heap_segment* seg = generation_allocation_segment (gen); seg != ephemeral_heap_segment; seg = heap_segment_next_rw (seg)) { heap_segment_plan_allocated (seg) = heap_segment_allocated (seg); } #endif //!USE_REGIONS if (heap_number == 0) { dprintf (2, ("heap%d: bgc lowest: %Ix, highest: %Ix", heap_number, background_saved_lowest_address, background_saved_highest_address)); } } #endif //BACKGROUND_GC inline void fire_drain_mark_list_event (size_t mark_list_objects) { FIRE_EVENT(BGCDrainMark, mark_list_objects); } inline void fire_revisit_event (size_t dirtied_pages, size_t marked_objects, BOOL large_objects_p) { FIRE_EVENT(BGCRevisit, dirtied_pages, marked_objects, large_objects_p); } inline void fire_overflow_event (uint8_t* overflow_min, uint8_t* overflow_max, size_t marked_objects, int gen_number) { FIRE_EVENT(BGCOverflow_V1, (uint64_t)overflow_min, (uint64_t)overflow_max, marked_objects, gen_number == loh_generation, gen_number); } void gc_heap::concurrent_print_time_delta (const char* msg) { #ifdef TRACE_GC uint64_t current_time = GetHighPrecisionTimeStamp(); size_t elapsed_time_ms = (size_t)((current_time - time_bgc_last) / 1000); time_bgc_last = current_time; dprintf (2, ("h%d: %s T %Id ms", heap_number, msg, elapsed_time_ms)); #else UNREFERENCED_PARAMETER(msg); #endif //TRACE_GC } void gc_heap::free_list_info (int gen_num, const char* msg) { #if defined (BACKGROUND_GC) && defined (TRACE_GC) dprintf (3, ("h%d: %s", heap_number, msg)); for (int i = 0; i < total_generation_count; i++) { generation* gen = generation_of (i); if ((generation_allocation_size (gen) == 0) && (generation_free_list_space (gen) == 0) && (generation_free_obj_space (gen) == 0)) { // don't print if everything is 0. } else { dprintf (3, ("h%d: g%d: a-%Id, fl-%Id, fo-%Id", heap_number, i, generation_allocation_size (gen), generation_free_list_space (gen), generation_free_obj_space (gen))); } } #else UNREFERENCED_PARAMETER(gen_num); UNREFERENCED_PARAMETER(msg); #endif // BACKGROUND_GC && TRACE_GC } void gc_heap::update_collection_counts_for_no_gc() { assert (settings.pause_mode == pause_no_gc); settings.condemned_generation = max_generation; #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) g_heaps[i]->update_collection_counts(); #else //MULTIPLE_HEAPS update_collection_counts(); #endif //MULTIPLE_HEAPS full_gc_counts[gc_type_blocking]++; } BOOL gc_heap::should_proceed_with_gc() { if (gc_heap::settings.pause_mode == pause_no_gc) { if (current_no_gc_region_info.started) { // The no_gc mode was already in progress yet we triggered another GC, // this effectively exits the no_gc mode. restore_data_for_no_gc(); } else return should_proceed_for_no_gc(); } return TRUE; } void gc_heap::update_end_gc_time_per_heap() { for (int gen_number = 0; gen_number <= settings.condemned_generation; gen_number++) { dynamic_data* dd = dynamic_data_of (gen_number); dd_gc_elapsed_time (dd) = (size_t)(end_gc_time - dd_time_clock (dd)); } } void gc_heap::update_end_ngc_time() { end_gc_time = GetHighPrecisionTimeStamp(); #ifdef HEAP_BALANCE_INSTRUMENTATION last_gc_end_time_us = end_gc_time; dprintf (HEAP_BALANCE_LOG, ("[GC#%Id-%Id-%Id]", settings.gc_index, (last_gc_end_time_us - dd_time_clock (dynamic_data_of (0))), dd_time_clock (dynamic_data_of (0)))); #endif //HEAP_BALANCE_INSTRUMENTATION } size_t gc_heap::exponential_smoothing (int gen, size_t collection_count, size_t desired_per_heap) { // to avoid spikes in mem usage due to short terms fluctuations in survivorship, // apply some smoothing. size_t smoothing = min(3, collection_count); size_t new_smoothed_desired_per_heap = desired_per_heap / smoothing + ((smoothed_desired_per_heap[gen] / smoothing) * (smoothing - 1)); dprintf (2, ("new smoothed_desired_per_heap for gen %d = %Id, desired_per_heap = %Id", gen, new_smoothed_desired_per_heap, desired_per_heap)); smoothed_desired_per_heap[gen] = new_smoothed_desired_per_heap; return Align (smoothed_desired_per_heap[gen], get_alignment_constant (gen <= soh_gen2)); } //internal part of gc used by the serial and concurrent version void gc_heap::gc1() { #ifdef BACKGROUND_GC assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread())); #endif //BACKGROUND_GC verify_soh_segment_list(); int n = settings.condemned_generation; if (settings.reason == reason_pm_full_gc) { assert (n == max_generation); init_records(); gen_to_condemn_tuning* local_condemn_reasons = &(get_gc_data_per_heap()->gen_to_condemn_reasons); local_condemn_reasons->init(); local_condemn_reasons->set_gen (gen_initial, n); local_condemn_reasons->set_gen (gen_final_per_heap, n); } update_collection_counts (); #ifdef BACKGROUND_GC bgc_alloc_lock->check(); #endif //BACKGROUND_GC free_list_info (max_generation, "beginning"); vm_heap->GcCondemnedGeneration = settings.condemned_generation; assert (g_gc_card_table == card_table); #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES assert (g_gc_card_bundle_table == card_bundle_table); #endif { #ifndef USE_REGIONS if (n == max_generation) { gc_low = lowest_address; gc_high = highest_address; } else { gc_low = generation_allocation_start (generation_of (n)); gc_high = heap_segment_reserved (ephemeral_heap_segment); } #endif //USE_REGIONS #ifdef BACKGROUND_GC if (settings.concurrent) { #ifdef TRACE_GC time_bgc_last = GetHighPrecisionTimeStamp(); #endif //TRACE_GC FIRE_EVENT(BGCBegin); concurrent_print_time_delta ("BGC"); concurrent_print_time_delta ("RW"); background_mark_phase(); free_list_info (max_generation, "after mark phase"); background_sweep(); free_list_info (max_generation, "after sweep phase"); } else #endif //BACKGROUND_GC { mark_phase (n, FALSE); check_gen0_bricks(); GCScan::GcRuntimeStructuresValid (FALSE); plan_phase (n); GCScan::GcRuntimeStructuresValid (TRUE); check_gen0_bricks(); } } //adjust the allocation size from the pinned quantities. for (int gen_number = 0; gen_number <= min (max_generation,n+1); gen_number++) { generation* gn = generation_of (gen_number); if (settings.compaction) { generation_pinned_allocated (gn) += generation_pinned_allocation_compact_size (gn); generation_allocation_size (generation_of (gen_number)) += generation_pinned_allocation_compact_size (gn); } else { generation_pinned_allocated (gn) += generation_pinned_allocation_sweep_size (gn); generation_allocation_size (generation_of (gen_number)) += generation_pinned_allocation_sweep_size (gn); } generation_pinned_allocation_sweep_size (gn) = 0; generation_pinned_allocation_compact_size (gn) = 0; } #ifdef BACKGROUND_GC if (settings.concurrent) { dynamic_data* dd = dynamic_data_of (n); end_gc_time = GetHighPrecisionTimeStamp(); dd_gc_elapsed_time (dd) = (size_t)(end_gc_time - dd_time_clock (dd)); #ifdef HEAP_BALANCE_INSTRUMENTATION if (heap_number == 0) { last_gc_end_time_us = end_gc_time; dprintf (HEAP_BALANCE_LOG, ("[GC#%Id-%Id-BGC]", settings.gc_index, dd_gc_elapsed_time (dd))); } #endif //HEAP_BALANCE_INSTRUMENTATION free_list_info (max_generation, "after computing new dynamic data"); gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap(); for (int gen_number = 0; gen_number < max_generation; gen_number++) { dprintf (2, ("end of BGC: gen%d new_alloc: %Id", gen_number, dd_desired_allocation (dynamic_data_of (gen_number)))); current_gc_data_per_heap->gen_data[gen_number].size_after = generation_size (gen_number); current_gc_data_per_heap->gen_data[gen_number].free_list_space_after = generation_free_list_space (generation_of (gen_number)); current_gc_data_per_heap->gen_data[gen_number].free_obj_space_after = generation_free_obj_space (generation_of (gen_number)); } } else #endif //BACKGROUND_GC { free_list_info (max_generation, "end"); for (int gen_number = 0; gen_number <= n; gen_number++) { compute_new_dynamic_data (gen_number); } if (n != max_generation) { for (int gen_number = (n + 1); gen_number < total_generation_count; gen_number++) { get_gc_data_per_heap()->gen_data[gen_number].size_after = generation_size (gen_number); get_gc_data_per_heap()->gen_data[gen_number].free_list_space_after = generation_free_list_space (generation_of (gen_number)); get_gc_data_per_heap()->gen_data[gen_number].free_obj_space_after = generation_free_obj_space (generation_of (gen_number)); } } get_gc_data_per_heap()->maxgen_size_info.running_free_list_efficiency = (uint32_t)(generation_allocator_efficiency (generation_of (max_generation)) * 100); free_list_info (max_generation, "after computing new dynamic data"); } if (n < max_generation) { int highest_gen_number = #ifdef USE_REGIONS max_generation; #else //USE_REGIONS 1 + n; #endif //USE_REGIONS for (int older_gen_idx = (1 + n); older_gen_idx <= highest_gen_number; older_gen_idx++) { compute_promoted_allocation (older_gen_idx); dynamic_data* dd = dynamic_data_of (older_gen_idx); size_t new_fragmentation = generation_free_list_space (generation_of (older_gen_idx)) + generation_free_obj_space (generation_of (older_gen_idx)); #ifdef BACKGROUND_GC if (current_c_gc_state != c_gc_state_planning) #endif //BACKGROUND_GC { if (settings.promotion) { dd_fragmentation (dd) = new_fragmentation; } else { //assert (dd_fragmentation (dd) == new_fragmentation); } } } } #ifdef BACKGROUND_GC if (!settings.concurrent) #endif //BACKGROUND_GC { #ifndef FEATURE_REDHAWK // GCToEEInterface::IsGCThread() always returns false on CoreRT, but this assert is useful in CoreCLR. assert(GCToEEInterface::IsGCThread()); #endif // FEATURE_REDHAWK adjust_ephemeral_limits(); } #if defined(BACKGROUND_GC) && !defined(USE_REGIONS) assert (ephemeral_low == generation_allocation_start (generation_of ( max_generation -1))); assert (ephemeral_high == heap_segment_reserved (ephemeral_heap_segment)); #endif //BACKGROUND_GC && !USE_REGIONS if (fgn_maxgen_percent) { if (settings.condemned_generation == (max_generation - 1)) { check_for_full_gc (max_generation - 1, 0); } else if (settings.condemned_generation == max_generation) { if (full_gc_approach_event_set #ifdef MULTIPLE_HEAPS && (heap_number == 0) #endif //MULTIPLE_HEAPS ) { dprintf (2, ("FGN-GC: setting gen2 end event")); full_gc_approach_event.Reset(); #ifdef BACKGROUND_GC // By definition WaitForFullGCComplete only succeeds if it's full, *blocking* GC, otherwise need to return N/A fgn_last_gc_was_concurrent = settings.concurrent ? TRUE : FALSE; #endif //BACKGROUND_GC full_gc_end_event.Set(); full_gc_approach_event_set = false; } } } #ifdef BACKGROUND_GC if (!settings.concurrent) #endif //BACKGROUND_GC { //decide on the next allocation quantum if (alloc_contexts_used >= 1) { allocation_quantum = Align (min ((size_t)CLR_SIZE, (size_t)max (1024, get_new_allocation (0) / (2 * alloc_contexts_used))), get_alignment_constant(FALSE)); dprintf (3, ("New allocation quantum: %d(0x%Ix)", allocation_quantum, allocation_quantum)); } } descr_generations ("END"); verify_soh_segment_list(); #ifdef BACKGROUND_GC if (gc_can_use_concurrent) { check_bgc_mark_stack_length(); } assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread())); #endif //BACKGROUND_GC #if defined(VERIFY_HEAP) || (defined (FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC)) if (FALSE #ifdef VERIFY_HEAP // Note that right now g_pConfig->GetHeapVerifyLevel always returns the same // value. If we ever allow randomly adjusting this as the process runs, // we cannot call it this way as joins need to match - we must have the same // value for all heaps like we do with bgc_heap_walk_for_etw_p. || (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) #endif #if defined(FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC) || (bgc_heap_walk_for_etw_p && settings.concurrent) #endif ) { #ifdef BACKGROUND_GC bool cooperative_mode = true; if (settings.concurrent) { cooperative_mode = enable_preemptive (); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_suspend_ee_verify); if (bgc_t_join.joined()) { bgc_threads_sync_event.Reset(); dprintf(2, ("Joining BGC threads to suspend EE for verify heap")); bgc_t_join.restart(); } if (heap_number == 0) { // need to take the gc_lock in preparation for verify_heap below // *before* we suspend the EE, otherwise we get a deadlock enter_gc_lock_for_verify_heap(); suspend_EE(); bgc_threads_sync_event.Set(); } else { bgc_threads_sync_event.Wait(INFINITE, FALSE); dprintf (2, ("bgc_threads_sync_event is signalled")); } #else //MULTIPLE_HEAPS // need to take the gc_lock in preparation for verify_heap below // *before* we suspend the EE, otherwise we get a deadlock enter_gc_lock_for_verify_heap(); suspend_EE(); #endif //MULTIPLE_HEAPS //fix the allocation area so verify_heap can proceed. fix_allocation_contexts (FALSE); } #endif //BACKGROUND_GC #ifdef BACKGROUND_GC assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread())); #ifdef FEATURE_EVENT_TRACE if (bgc_heap_walk_for_etw_p && settings.concurrent) { GCToEEInterface::DiagWalkBGCSurvivors(__this); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_after_profiler_heap_walk); if (bgc_t_join.joined()) { bgc_t_join.restart(); } #endif // MULTIPLE_HEAPS } #endif // FEATURE_EVENT_TRACE #endif //BACKGROUND_GC #ifdef VERIFY_HEAP if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) verify_heap (FALSE); #endif // VERIFY_HEAP #ifdef BACKGROUND_GC if (settings.concurrent) { repair_allocation_contexts (TRUE); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_restart_ee_verify); if (bgc_t_join.joined()) { bgc_threads_sync_event.Reset(); dprintf(2, ("Joining BGC threads to restart EE after verify heap")); bgc_t_join.restart(); } if (heap_number == 0) { restart_EE(); leave_gc_lock_for_verify_heap(); bgc_threads_sync_event.Set(); } else { bgc_threads_sync_event.Wait(INFINITE, FALSE); dprintf (2, ("bgc_threads_sync_event is signalled")); } #else //MULTIPLE_HEAPS restart_EE(); leave_gc_lock_for_verify_heap(); #endif //MULTIPLE_HEAPS disable_preemptive (cooperative_mode); } #endif //BACKGROUND_GC } #endif //VERIFY_HEAP || (FEATURE_EVENT_TRACE && BACKGROUND_GC) #ifdef MULTIPLE_HEAPS if (!settings.concurrent) { gc_t_join.join(this, gc_join_done); if (gc_t_join.joined ()) { gc_heap::internal_gc_done = false; //equalize the new desired size of the generations int limit = settings.condemned_generation; if (limit == max_generation) { limit = total_generation_count-1; } for (int gen = 0; gen <= limit; gen++) { size_t total_desired = 0; size_t total_already_consumed = 0; for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; dynamic_data* dd = hp->dynamic_data_of (gen); size_t temp_total_desired = total_desired + dd_desired_allocation (dd); if (temp_total_desired < total_desired) { // we overflowed. total_desired = (size_t)MAX_PTR; break; } total_desired = temp_total_desired; // for gen 1 and gen 2, there may have been some incoming size // already accounted for assert ((ptrdiff_t)dd_desired_allocation (dd) >= dd_new_allocation (dd)); size_t already_consumed = dd_desired_allocation (dd) - dd_new_allocation (dd); size_t temp_total_already_consumed = total_already_consumed + already_consumed; // we should never have an overflow here as the consumed size should always fit in a size_t assert (temp_total_already_consumed >= total_already_consumed); total_already_consumed = temp_total_already_consumed; } size_t desired_per_heap = Align (total_desired/gc_heap::n_heaps, get_alignment_constant (gen <= max_generation)); size_t already_consumed_per_heap = total_already_consumed / gc_heap::n_heaps; if (gen == 0) { #if 1 //subsumed by the linear allocation model // to avoid spikes in mem usage due to short terms fluctuations in survivorship, // apply some smoothing. desired_per_heap = exponential_smoothing (gen, dd_collection_count (dynamic_data_of(gen)), desired_per_heap); #endif //0 if (!heap_hard_limit) { // if desired_per_heap is close to min_gc_size, trim it // down to min_gc_size to stay in the cache gc_heap* hp = gc_heap::g_heaps[0]; dynamic_data* dd = hp->dynamic_data_of (gen); size_t min_gc_size = dd_min_size(dd); // if min GC size larger than true on die cache, then don't bother // limiting the desired size if ((min_gc_size <= GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE)) && desired_per_heap <= 2*min_gc_size) { desired_per_heap = min_gc_size; } } #ifdef HOST_64BIT desired_per_heap = joined_youngest_desired (desired_per_heap); dprintf (2, ("final gen0 new_alloc: %Id", desired_per_heap)); #endif // HOST_64BIT gc_data_global.final_youngest_desired = desired_per_heap; } #if 1 //subsumed by the linear allocation model if (gen >= uoh_start_generation) { // to avoid spikes in mem usage due to short terms fluctuations in survivorship, // apply some smoothing. desired_per_heap = exponential_smoothing (gen, dd_collection_count (dynamic_data_of (max_generation)), desired_per_heap); } #endif //0 for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; dynamic_data* dd = hp->dynamic_data_of (gen); dd_desired_allocation (dd) = desired_per_heap; dd_gc_new_allocation (dd) = desired_per_heap; dd_new_allocation (dd) = desired_per_heap - already_consumed_per_heap; if (gen == 0) { hp->fgn_last_alloc = desired_per_heap; } } } #ifdef FEATURE_LOH_COMPACTION BOOL all_heaps_compacted_p = TRUE; #endif //FEATURE_LOH_COMPACTION int max_gen0_must_clear_bricks = 0; for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; hp->decommit_ephemeral_segment_pages(); hp->rearrange_uoh_segments(); #ifdef FEATURE_LOH_COMPACTION all_heaps_compacted_p &= hp->loh_compacted_p; #endif //FEATURE_LOH_COMPACTION // compute max of gen0_must_clear_bricks over all heaps max_gen0_must_clear_bricks = max(max_gen0_must_clear_bricks, hp->gen0_must_clear_bricks); } #ifdef USE_REGIONS distribute_free_regions(); #endif //USE_REGIONS #ifdef FEATURE_LOH_COMPACTION check_loh_compact_mode (all_heaps_compacted_p); #endif //FEATURE_LOH_COMPACTION // if max_gen0_must_clear_bricks > 0, distribute to all heaps - // if one heap encountered an interior pointer during this GC, // the next GC might see one on another heap if (max_gen0_must_clear_bricks > 0) { for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; hp->gen0_must_clear_bricks = max_gen0_must_clear_bricks; } } for (int i = 0; i < gc_heap::n_heaps; i++) { g_heaps[i]->descr_generations ("END"); #ifdef USE_REGIONS if (settings.condemned_generation == max_generation) { // age and print all kinds of free regions region_free_list::age_free_regions (g_heaps[i]->free_regions); region_free_list::print (g_heaps[i]->free_regions, i, "END"); } else { // age and print only basic free regions g_heaps[i]->free_regions[basic_free_region].age_free_regions(); g_heaps[i]->free_regions[basic_free_region].print (i, "END"); } #endif //USE_REGIONS } fire_pevents(); update_end_ngc_time(); pm_full_gc_init_or_clear(); gc_t_join.restart(); } update_end_gc_time_per_heap(); add_to_history_per_heap(); alloc_context_count = 0; heap_select::mark_heap (heap_number); } #else //MULTIPLE_HEAPS gc_data_global.final_youngest_desired = dd_desired_allocation (dynamic_data_of (0)); #ifdef FEATURE_LOH_COMPACTION check_loh_compact_mode (loh_compacted_p); #endif //FEATURE_LOH_COMPACTION decommit_ephemeral_segment_pages(); fire_pevents(); if (!(settings.concurrent)) { #ifdef USE_REGIONS distribute_free_regions(); if (settings.condemned_generation == max_generation) { // age and print all kinds of free regions region_free_list::age_free_regions(free_regions); region_free_list::print(free_regions, 0, "END"); } else { // age and print only basic free regions free_regions[basic_free_region].age_free_regions(); free_regions[basic_free_region].print (0, "END"); } #endif //USE_REGIONS rearrange_uoh_segments(); update_end_ngc_time(); update_end_gc_time_per_heap(); add_to_history_per_heap(); do_post_gc(); } pm_full_gc_init_or_clear(); #ifdef BACKGROUND_GC recover_bgc_settings(); #endif //BACKGROUND_GC #endif //MULTIPLE_HEAPS } void gc_heap::save_data_for_no_gc() { current_no_gc_region_info.saved_pause_mode = settings.pause_mode; #ifdef MULTIPLE_HEAPS // This is to affect heap balancing. for (int i = 0; i < n_heaps; i++) { current_no_gc_region_info.saved_gen0_min_size = dd_min_size (g_heaps[i]->dynamic_data_of (0)); dd_min_size (g_heaps[i]->dynamic_data_of (0)) = min_balance_threshold; current_no_gc_region_info.saved_gen3_min_size = dd_min_size (g_heaps[i]->dynamic_data_of (loh_generation)); dd_min_size (g_heaps[i]->dynamic_data_of (loh_generation)) = 0; } #endif //MULTIPLE_HEAPS } void gc_heap::restore_data_for_no_gc() { gc_heap::settings.pause_mode = current_no_gc_region_info.saved_pause_mode; #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { dd_min_size (g_heaps[i]->dynamic_data_of (0)) = current_no_gc_region_info.saved_gen0_min_size; dd_min_size (g_heaps[i]->dynamic_data_of (loh_generation)) = current_no_gc_region_info.saved_gen3_min_size; } #endif //MULTIPLE_HEAPS } start_no_gc_region_status gc_heap::prepare_for_no_gc_region (uint64_t total_size, BOOL loh_size_known, uint64_t loh_size, BOOL disallow_full_blocking) { if (current_no_gc_region_info.started) { return start_no_gc_in_progress; } start_no_gc_region_status status = start_no_gc_success; save_data_for_no_gc(); settings.pause_mode = pause_no_gc; current_no_gc_region_info.start_status = start_no_gc_success; uint64_t allocation_no_gc_loh = 0; uint64_t allocation_no_gc_soh = 0; assert(total_size != 0); if (loh_size_known) { assert(loh_size != 0); assert(loh_size <= total_size); allocation_no_gc_loh = loh_size; allocation_no_gc_soh = total_size - loh_size; } else { allocation_no_gc_soh = total_size; allocation_no_gc_loh = total_size; } int soh_align_const = get_alignment_constant (TRUE); #ifdef USE_REGIONS size_t max_soh_allocated = SIZE_T_MAX; #else size_t max_soh_allocated = soh_segment_size - segment_info_size - eph_gen_starts_size; #endif size_t size_per_heap = 0; const double scale_factor = 1.05; int num_heaps = get_num_heaps(); uint64_t total_allowed_soh_allocation = (uint64_t)max_soh_allocated * num_heaps; // [LOCALGC TODO] // In theory, the upper limit here is the physical memory of the machine, not // SIZE_T_MAX. This is not true today because total_physical_mem can be // larger than SIZE_T_MAX if running in wow64 on a machine with more than // 4GB of RAM. Once Local GC code divergence is resolved and code is flowing // more freely between branches, it would be good to clean this up to use // total_physical_mem instead of SIZE_T_MAX. assert(total_allowed_soh_allocation <= SIZE_T_MAX); uint64_t total_allowed_loh_allocation = SIZE_T_MAX; uint64_t total_allowed_soh_alloc_scaled = allocation_no_gc_soh > 0 ? static_cast<uint64_t>(total_allowed_soh_allocation / scale_factor) : 0; uint64_t total_allowed_loh_alloc_scaled = allocation_no_gc_loh > 0 ? static_cast<uint64_t>(total_allowed_loh_allocation / scale_factor) : 0; if (allocation_no_gc_soh > total_allowed_soh_alloc_scaled || allocation_no_gc_loh > total_allowed_loh_alloc_scaled) { status = start_no_gc_too_large; goto done; } if (allocation_no_gc_soh > 0) { allocation_no_gc_soh = static_cast<uint64_t>(allocation_no_gc_soh * scale_factor); allocation_no_gc_soh = min (allocation_no_gc_soh, total_allowed_soh_alloc_scaled); } if (allocation_no_gc_loh > 0) { allocation_no_gc_loh = static_cast<uint64_t>(allocation_no_gc_loh * scale_factor); allocation_no_gc_loh = min (allocation_no_gc_loh, total_allowed_loh_alloc_scaled); } if (disallow_full_blocking) current_no_gc_region_info.minimal_gc_p = TRUE; if (allocation_no_gc_soh != 0) { current_no_gc_region_info.soh_allocation_size = (size_t)allocation_no_gc_soh; size_per_heap = current_no_gc_region_info.soh_allocation_size; #ifdef MULTIPLE_HEAPS size_per_heap /= n_heaps; for (int i = 0; i < n_heaps; i++) { // due to heap balancing we need to allow some room before we even look to balance to another heap. g_heaps[i]->soh_allocation_no_gc = min (Align ((size_per_heap + min_balance_threshold), soh_align_const), max_soh_allocated); } #else //MULTIPLE_HEAPS soh_allocation_no_gc = min (Align (size_per_heap, soh_align_const), max_soh_allocated); #endif //MULTIPLE_HEAPS } if (allocation_no_gc_loh != 0) { current_no_gc_region_info.loh_allocation_size = (size_t)allocation_no_gc_loh; size_per_heap = current_no_gc_region_info.loh_allocation_size; #ifdef MULTIPLE_HEAPS size_per_heap /= n_heaps; for (int i = 0; i < n_heaps; i++) g_heaps[i]->loh_allocation_no_gc = Align (size_per_heap, get_alignment_constant (FALSE)); #else //MULTIPLE_HEAPS loh_allocation_no_gc = Align (size_per_heap, get_alignment_constant (FALSE)); #endif //MULTIPLE_HEAPS } done: if (status != start_no_gc_success) restore_data_for_no_gc(); return status; } void gc_heap::handle_failure_for_no_gc() { gc_heap::restore_data_for_no_gc(); // sets current_no_gc_region_info.started to FALSE here. memset (&current_no_gc_region_info, 0, sizeof (current_no_gc_region_info)); } start_no_gc_region_status gc_heap::get_start_no_gc_region_status() { return current_no_gc_region_info.start_status; } void gc_heap::record_gcs_during_no_gc() { if (current_no_gc_region_info.started) { current_no_gc_region_info.num_gcs++; if (is_induced (settings.reason)) current_no_gc_region_info.num_gcs_induced++; } } BOOL gc_heap::find_loh_free_for_no_gc() { allocator* loh_allocator = generation_allocator (generation_of (loh_generation)); size_t size = loh_allocation_no_gc; for (unsigned int a_l_idx = loh_allocator->first_suitable_bucket(size); a_l_idx < loh_allocator->number_of_buckets(); a_l_idx++) { uint8_t* free_list = loh_allocator->alloc_list_head_of (a_l_idx); while (free_list) { size_t free_list_size = unused_array_size(free_list); if (free_list_size > size) { dprintf (3, ("free item %Ix(%Id) for no gc", (size_t)free_list, free_list_size)); return TRUE; } free_list = free_list_slot (free_list); } } return FALSE; } BOOL gc_heap::find_loh_space_for_no_gc() { saved_loh_segment_no_gc = 0; if (find_loh_free_for_no_gc()) return TRUE; heap_segment* seg = generation_allocation_segment (generation_of (loh_generation)); while (seg) { size_t remaining = heap_segment_reserved (seg) - heap_segment_allocated (seg); if (remaining >= loh_allocation_no_gc) { saved_loh_segment_no_gc = seg; break; } seg = heap_segment_next (seg); } if (!saved_loh_segment_no_gc && current_no_gc_region_info.minimal_gc_p) { // If no full GC is allowed, we try to get a new seg right away. saved_loh_segment_no_gc = get_segment_for_uoh (loh_generation, get_uoh_seg_size (loh_allocation_no_gc) #ifdef MULTIPLE_HEAPS , this #endif //MULTIPLE_HEAPS ); } return (saved_loh_segment_no_gc != 0); } BOOL gc_heap::loh_allocated_for_no_gc() { if (!saved_loh_segment_no_gc) return FALSE; heap_segment* seg = generation_allocation_segment (generation_of (loh_generation)); do { if (seg == saved_loh_segment_no_gc) { return FALSE; } seg = heap_segment_next (seg); } while (seg); return TRUE; } BOOL gc_heap::commit_loh_for_no_gc (heap_segment* seg) { uint8_t* end_committed = heap_segment_allocated (seg) + loh_allocation_no_gc; assert (end_committed <= heap_segment_reserved (seg)); return (grow_heap_segment (seg, end_committed)); } void gc_heap::thread_no_gc_loh_segments() { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; if (hp->loh_allocated_for_no_gc()) { hp->thread_uoh_segment (loh_generation, hp->saved_loh_segment_no_gc); hp->saved_loh_segment_no_gc = 0; } } #else //MULTIPLE_HEAPS if (loh_allocated_for_no_gc()) { thread_uoh_segment (loh_generation, saved_loh_segment_no_gc); saved_loh_segment_no_gc = 0; } #endif //MULTIPLE_HEAPS } void gc_heap::set_loh_allocations_for_no_gc() { if (current_no_gc_region_info.loh_allocation_size != 0) { dynamic_data* dd = dynamic_data_of (loh_generation); dd_new_allocation (dd) = loh_allocation_no_gc; dd_gc_new_allocation (dd) = dd_new_allocation (dd); } } void gc_heap::set_soh_allocations_for_no_gc() { if (current_no_gc_region_info.soh_allocation_size != 0) { dynamic_data* dd = dynamic_data_of (0); dd_new_allocation (dd) = soh_allocation_no_gc; dd_gc_new_allocation (dd) = dd_new_allocation (dd); #ifdef MULTIPLE_HEAPS alloc_context_count = 0; #endif //MULTIPLE_HEAPS } } void gc_heap::set_allocations_for_no_gc() { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; hp->set_loh_allocations_for_no_gc(); hp->set_soh_allocations_for_no_gc(); } #else //MULTIPLE_HEAPS set_loh_allocations_for_no_gc(); set_soh_allocations_for_no_gc(); #endif //MULTIPLE_HEAPS } BOOL gc_heap::should_proceed_for_no_gc() { BOOL gc_requested = FALSE; BOOL loh_full_gc_requested = FALSE; BOOL soh_full_gc_requested = FALSE; BOOL no_gc_requested = FALSE; BOOL get_new_loh_segments = FALSE; gc_heap* hp = nullptr; if (current_no_gc_region_info.soh_allocation_size) { #ifdef USE_REGIONS #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { hp = g_heaps[i]; #else { hp = pGenGCHeap; #endif //MULTIPLE_HEAPS if (!hp->extend_soh_for_no_gc()) { soh_full_gc_requested = TRUE; #ifdef MULTIPLE_HEAPS break; #endif //MULTIPLE_HEAPS } } #else //USE_REGIONS #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { hp = g_heaps[i]; #else //MULTIPLE_HEAPS { hp = pGenGCHeap; #endif //MULTIPLE_HEAPS size_t reserved_space = heap_segment_reserved (hp->ephemeral_heap_segment) - hp->alloc_allocated; if (reserved_space < hp->soh_allocation_no_gc) { gc_requested = TRUE; #ifdef MULTIPLE_HEAPS break; #endif //MULTIPLE_HEAPS } } if (!gc_requested) { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { hp = g_heaps[i]; #else //MULTIPLE_HEAPS { hp = pGenGCHeap; #endif //MULTIPLE_HEAPS if (!(hp->grow_heap_segment (hp->ephemeral_heap_segment, (hp->alloc_allocated + hp->soh_allocation_no_gc)))) { soh_full_gc_requested = TRUE; #ifdef MULTIPLE_HEAPS break; #endif //MULTIPLE_HEAPS } } } #endif //USE_REGIONS } if (!current_no_gc_region_info.minimal_gc_p && gc_requested) { soh_full_gc_requested = TRUE; } no_gc_requested = !(soh_full_gc_requested || gc_requested); if (soh_full_gc_requested && current_no_gc_region_info.minimal_gc_p) { current_no_gc_region_info.start_status = start_no_gc_no_memory; goto done; } if (!soh_full_gc_requested && current_no_gc_region_info.loh_allocation_size) { // Check to see if we have enough reserved space. #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; if (!hp->find_loh_space_for_no_gc()) { loh_full_gc_requested = TRUE; break; } } #else //MULTIPLE_HEAPS if (!find_loh_space_for_no_gc()) loh_full_gc_requested = TRUE; #endif //MULTIPLE_HEAPS // Check to see if we have committed space. if (!loh_full_gc_requested) { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; if (hp->saved_loh_segment_no_gc &&!hp->commit_loh_for_no_gc (hp->saved_loh_segment_no_gc)) { loh_full_gc_requested = TRUE; break; } } #else //MULTIPLE_HEAPS if (saved_loh_segment_no_gc && !commit_loh_for_no_gc (saved_loh_segment_no_gc)) loh_full_gc_requested = TRUE; #endif //MULTIPLE_HEAPS } } if (loh_full_gc_requested || soh_full_gc_requested) { if (current_no_gc_region_info.minimal_gc_p) current_no_gc_region_info.start_status = start_no_gc_no_memory; } no_gc_requested = !(loh_full_gc_requested || soh_full_gc_requested || gc_requested); if (current_no_gc_region_info.start_status == start_no_gc_success) { if (no_gc_requested) set_allocations_for_no_gc(); } done: if ((current_no_gc_region_info.start_status == start_no_gc_success) && !no_gc_requested) return TRUE; else { // We are done with starting the no_gc_region. current_no_gc_region_info.started = TRUE; return FALSE; } } end_no_gc_region_status gc_heap::end_no_gc_region() { dprintf (1, ("end no gc called")); end_no_gc_region_status status = end_no_gc_success; if (!(current_no_gc_region_info.started)) status = end_no_gc_not_in_progress; if (current_no_gc_region_info.num_gcs_induced) status = end_no_gc_induced; else if (current_no_gc_region_info.num_gcs) status = end_no_gc_alloc_exceeded; if (settings.pause_mode == pause_no_gc) restore_data_for_no_gc(); // sets current_no_gc_region_info.started to FALSE here. memset (&current_no_gc_region_info, 0, sizeof (current_no_gc_region_info)); return status; } //update counters void gc_heap::update_collection_counts () { dynamic_data* dd0 = dynamic_data_of (0); dd_gc_clock (dd0) += 1; uint64_t now = GetHighPrecisionTimeStamp(); for (int i = 0; i <= settings.condemned_generation;i++) { dynamic_data* dd = dynamic_data_of (i); dd_collection_count (dd)++; //this is needed by the linear allocation model if (i == max_generation) { dd_collection_count (dynamic_data_of (loh_generation))++; dd_collection_count(dynamic_data_of(poh_generation))++; } dd_gc_clock (dd) = dd_gc_clock (dd0); dd_previous_time_clock (dd) = dd_time_clock (dd); dd_time_clock (dd) = now; } } #ifdef USE_REGIONS bool gc_heap::extend_soh_for_no_gc() { size_t required = soh_allocation_no_gc; heap_segment* region = ephemeral_heap_segment; while (true) { uint8_t* allocated = (region == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (region); size_t available = heap_segment_reserved (region) - allocated; size_t commit = min (available, required); if (grow_heap_segment (region, allocated + commit)) { required -= commit; if (required == 0) { break; } region = heap_segment_next (region); if (region == nullptr) { region = get_new_region (0); if (region == nullptr) { break; } else { GCToEEInterface::DiagAddNewRegion( 0, heap_segment_mem (region), heap_segment_allocated (region), heap_segment_reserved (region) ); } } } else { break; } } return (required == 0); } #else BOOL gc_heap::expand_soh_with_minimal_gc() { if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment)) >= soh_allocation_no_gc) return TRUE; heap_segment* new_seg = soh_get_segment_to_expand(); if (new_seg) { if (g_gc_card_table != card_table) copy_brick_card_table(); settings.promotion = TRUE; settings.demotion = FALSE; ephemeral_promotion = TRUE; int condemned_gen_number = max_generation - 1; int align_const = get_alignment_constant (TRUE); for (int i = 0; i <= condemned_gen_number; i++) { generation* gen = generation_of (i); saved_ephemeral_plan_start[i] = generation_allocation_start (gen); saved_ephemeral_plan_start_size[i] = Align (size (generation_allocation_start (gen)), align_const); } // We do need to clear the bricks here as we are converting a bunch of ephemeral objects to gen2 // and need to make sure that there are no left over bricks from the previous GCs for the space // we just used for gen0 allocation. We will need to go through the bricks for these objects for // ephemeral GCs later. for (size_t b = brick_of (generation_allocation_start (generation_of (0))); b < brick_of (align_on_brick (heap_segment_allocated (ephemeral_heap_segment))); b++) { set_brick (b, -1); } size_t ephemeral_size = (heap_segment_allocated (ephemeral_heap_segment) - generation_allocation_start (generation_of (max_generation - 1))); heap_segment_next (ephemeral_heap_segment) = new_seg; ephemeral_heap_segment = new_seg; uint8_t* start = heap_segment_mem (ephemeral_heap_segment); for (int i = condemned_gen_number; i >= 0; i--) { size_t gen_start_size = Align (min_obj_size); make_generation (i, ephemeral_heap_segment, start); generation* gen = generation_of (i); generation_plan_allocation_start (gen) = start; generation_plan_allocation_start_size (gen) = gen_start_size; start += gen_start_size; } heap_segment_used (ephemeral_heap_segment) = start - plug_skew; heap_segment_plan_allocated (ephemeral_heap_segment) = start; fix_generation_bounds (condemned_gen_number, generation_of (0)); dd_gc_new_allocation (dynamic_data_of (max_generation)) -= ephemeral_size; dd_new_allocation (dynamic_data_of (max_generation)) = dd_gc_new_allocation (dynamic_data_of (max_generation)); adjust_ephemeral_limits(); return TRUE; } else { return FALSE; } } #endif //USE_REGIONS // Only to be done on the thread that calls restart in a join for server GC // and reset the oom status per heap. void gc_heap::check_and_set_no_gc_oom() { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; if (hp->no_gc_oom_p) { current_no_gc_region_info.start_status = start_no_gc_no_memory; hp->no_gc_oom_p = false; } } #else if (no_gc_oom_p) { current_no_gc_region_info.start_status = start_no_gc_no_memory; no_gc_oom_p = false; } #endif //MULTIPLE_HEAPS } void gc_heap::allocate_for_no_gc_after_gc() { if (current_no_gc_region_info.minimal_gc_p) repair_allocation_contexts (TRUE); no_gc_oom_p = false; if (current_no_gc_region_info.start_status != start_no_gc_no_memory) { if (current_no_gc_region_info.soh_allocation_size != 0) { #ifdef USE_REGIONS no_gc_oom_p = !extend_soh_for_no_gc(); #else if (((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment)) < soh_allocation_no_gc) || (!grow_heap_segment (ephemeral_heap_segment, (heap_segment_allocated (ephemeral_heap_segment) + soh_allocation_no_gc)))) { no_gc_oom_p = true; } #endif //USE_REGIONS #ifdef MULTIPLE_HEAPS gc_t_join.join(this, gc_join_after_commit_soh_no_gc); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { check_and_set_no_gc_oom(); #ifdef MULTIPLE_HEAPS gc_t_join.restart(); #endif //MULTIPLE_HEAPS } } if ((current_no_gc_region_info.start_status == start_no_gc_success) && !(current_no_gc_region_info.minimal_gc_p) && (current_no_gc_region_info.loh_allocation_size != 0)) { gc_policy = policy_compact; saved_loh_segment_no_gc = 0; if (!find_loh_free_for_no_gc()) { heap_segment* seg = generation_allocation_segment (generation_of (loh_generation)); BOOL found_seg_p = FALSE; while (seg) { if ((size_t)(heap_segment_reserved (seg) - heap_segment_allocated (seg)) >= loh_allocation_no_gc) { found_seg_p = TRUE; if (!commit_loh_for_no_gc (seg)) { no_gc_oom_p = true; break; } } seg = heap_segment_next (seg); } if (!found_seg_p) gc_policy = policy_expand; } #ifdef MULTIPLE_HEAPS gc_t_join.join(this, gc_join_expand_loh_no_gc); if (gc_t_join.joined()) { check_and_set_no_gc_oom(); if (current_no_gc_region_info.start_status == start_no_gc_success) { for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; if (hp->gc_policy == policy_expand) { hp->saved_loh_segment_no_gc = get_segment_for_uoh (loh_generation, get_uoh_seg_size (loh_allocation_no_gc), hp); if (!(hp->saved_loh_segment_no_gc)) { current_no_gc_region_info.start_status = start_no_gc_no_memory; break; } } } } gc_t_join.restart(); } #else //MULTIPLE_HEAPS check_and_set_no_gc_oom(); if ((current_no_gc_region_info.start_status == start_no_gc_success) && (gc_policy == policy_expand)) { saved_loh_segment_no_gc = get_segment_for_uoh (loh_generation, get_uoh_seg_size (loh_allocation_no_gc)); if (!saved_loh_segment_no_gc) current_no_gc_region_info.start_status = start_no_gc_no_memory; } #endif //MULTIPLE_HEAPS if ((current_no_gc_region_info.start_status == start_no_gc_success) && saved_loh_segment_no_gc) { if (!commit_loh_for_no_gc (saved_loh_segment_no_gc)) { no_gc_oom_p = true; } } } } #ifdef MULTIPLE_HEAPS gc_t_join.join(this, gc_join_final_no_gc); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { check_and_set_no_gc_oom(); if (current_no_gc_region_info.start_status == start_no_gc_success) { set_allocations_for_no_gc(); current_no_gc_region_info.started = TRUE; } #ifdef MULTIPLE_HEAPS gc_t_join.restart(); #endif //MULTIPLE_HEAPS } } void gc_heap::init_records() { // An option is to move this to be after we figure out which gen to condemn so we don't // need to clear some generations' data 'cause we know they don't change, but that also means // we can't simply call memset here. memset (&gc_data_per_heap, 0, sizeof (gc_data_per_heap)); gc_data_per_heap.heap_index = heap_number; if (heap_number == 0) memset (&gc_data_global, 0, sizeof (gc_data_global)); #ifdef GC_CONFIG_DRIVEN memset (interesting_data_per_gc, 0, sizeof (interesting_data_per_gc)); #endif //GC_CONFIG_DRIVEN memset (&fgm_result, 0, sizeof (fgm_result)); for (int i = 0; i < total_generation_count; i++) { gc_data_per_heap.gen_data[i].size_before = generation_size (i); generation* gen = generation_of (i); gc_data_per_heap.gen_data[i].free_list_space_before = generation_free_list_space (gen); gc_data_per_heap.gen_data[i].free_obj_space_before = generation_free_obj_space (gen); } #ifdef USE_REGIONS end_gen0_region_space = 0; gen0_pinned_free_space = 0; gen0_large_chunk_found = false; num_regions_freed_in_sweep = 0; #endif //USE_REGIONS sufficient_gen0_space_p = FALSE; #ifdef MULTIPLE_HEAPS gen0_allocated_after_gc_p = false; #endif //MULTIPLE_HEAPS #if defined (_DEBUG) && defined (VERIFY_HEAP) verify_pinned_queue_p = FALSE; #endif // _DEBUG && VERIFY_HEAP } void gc_heap::pm_full_gc_init_or_clear() { // This means the next GC will be a full blocking GC and we need to init. if (settings.condemned_generation == (max_generation - 1)) { if (pm_trigger_full_gc) { #ifdef MULTIPLE_HEAPS do_post_gc(); #endif //MULTIPLE_HEAPS dprintf (GTC_LOG, ("init for PM triggered full GC")); uint32_t saved_entry_memory_load = settings.entry_memory_load; settings.init_mechanisms(); settings.reason = reason_pm_full_gc; settings.condemned_generation = max_generation; settings.entry_memory_load = saved_entry_memory_load; // Can't assert this since we only check at the end of gen2 GCs, // during gen1 the memory load could have already dropped. // Although arguably we should just turn off PM then... //assert (settings.entry_memory_load >= high_memory_load_th); assert (settings.entry_memory_load > 0); settings.gc_index += 1; do_pre_gc(); } } // This means we are in the progress of a full blocking GC triggered by // this PM mode. else if (settings.reason == reason_pm_full_gc) { assert (settings.condemned_generation == max_generation); assert (pm_trigger_full_gc); pm_trigger_full_gc = false; dprintf (GTC_LOG, ("PM triggered full GC done")); } } void gc_heap::garbage_collect_pm_full_gc() { assert (settings.condemned_generation == max_generation); assert (settings.reason == reason_pm_full_gc); assert (!settings.concurrent); gc1(); } void gc_heap::garbage_collect (int n) { //reset the number of alloc contexts alloc_contexts_used = 0; fix_allocation_contexts (TRUE); #ifdef MULTIPLE_HEAPS #ifdef JOIN_STATS gc_t_join.start_ts(this); #endif //JOIN_STATS check_gen0_bricks(); clear_gen0_bricks(); #endif //MULTIPLE_HEAPS if ((settings.pause_mode == pause_no_gc) && current_no_gc_region_info.minimal_gc_p) { #ifdef MULTIPLE_HEAPS gc_t_join.join(this, gc_join_minimal_gc); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifndef USE_REGIONS #ifdef MULTIPLE_HEAPS // this is serialized because we need to get a segment for (int i = 0; i < n_heaps; i++) { if (!(g_heaps[i]->expand_soh_with_minimal_gc())) current_no_gc_region_info.start_status = start_no_gc_no_memory; } #else if (!expand_soh_with_minimal_gc()) current_no_gc_region_info.start_status = start_no_gc_no_memory; #endif //MULTIPLE_HEAPS #endif //!USE_REGIONS update_collection_counts_for_no_gc(); #ifdef MULTIPLE_HEAPS gc_start_event.Reset(); gc_t_join.restart(); #endif //MULTIPLE_HEAPS } goto done; } init_records(); settings.reason = gc_trigger_reason; num_pinned_objects = 0; #ifdef STRESS_HEAP if (settings.reason == reason_gcstress) { settings.reason = reason_induced; settings.stress_induced = TRUE; } #endif // STRESS_HEAP #ifdef MULTIPLE_HEAPS //align all heaps on the max generation to condemn dprintf (3, ("Joining for max generation to condemn")); condemned_generation_num = generation_to_condemn (n, &blocking_collection, &elevation_requested, FALSE); gc_t_join.join(this, gc_join_generation_determined); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_BASICFREEZE seg_table->delete_old_slots(); #endif //FEATURE_BASICFREEZE #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; // check for card table growth if (g_gc_card_table != hp->card_table) hp->copy_brick_card_table(); hp->rearrange_uoh_segments(); #ifdef BACKGROUND_GC hp->background_delay_delete_uoh_segments(); if (!gc_heap::background_running_p()) hp->rearrange_small_heap_segments(); #endif //BACKGROUND_GC } #else //MULTIPLE_HEAPS if (g_gc_card_table != card_table) copy_brick_card_table(); rearrange_uoh_segments(); #ifdef BACKGROUND_GC background_delay_delete_uoh_segments(); if (!gc_heap::background_running_p()) rearrange_small_heap_segments(); #endif //BACKGROUND_GC #endif //MULTIPLE_HEAPS BOOL should_evaluate_elevation = TRUE; BOOL should_do_blocking_collection = FALSE; #ifdef MULTIPLE_HEAPS int gen_max = condemned_generation_num; for (int i = 0; i < n_heaps; i++) { if (gen_max < g_heaps[i]->condemned_generation_num) gen_max = g_heaps[i]->condemned_generation_num; if (should_evaluate_elevation && !(g_heaps[i]->elevation_requested)) should_evaluate_elevation = FALSE; if ((!should_do_blocking_collection) && (g_heaps[i]->blocking_collection)) should_do_blocking_collection = TRUE; } settings.condemned_generation = gen_max; #else //MULTIPLE_HEAPS settings.condemned_generation = generation_to_condemn (n, &blocking_collection, &elevation_requested, FALSE); should_evaluate_elevation = elevation_requested; should_do_blocking_collection = blocking_collection; #endif //MULTIPLE_HEAPS settings.condemned_generation = joined_generation_to_condemn ( should_evaluate_elevation, n, settings.condemned_generation, &should_do_blocking_collection STRESS_HEAP_ARG(n) ); STRESS_LOG1(LF_GCROOTS|LF_GC|LF_GCALLOC, LL_INFO10, "condemned generation num: %d\n", settings.condemned_generation); record_gcs_during_no_gc(); if (settings.condemned_generation > 1) settings.promotion = TRUE; #ifdef HEAP_ANALYZE // At this point we've decided what generation is condemned // See if we've been requested to analyze survivors after the mark phase if (GCToEEInterface::AnalyzeSurvivorsRequested(settings.condemned_generation)) { heap_analyze_enabled = TRUE; } #endif // HEAP_ANALYZE GCToEEInterface::DiagGCStart(settings.condemned_generation, settings.reason == reason_induced); #ifdef BACKGROUND_GC if ((settings.condemned_generation == max_generation) && (should_do_blocking_collection == FALSE) && gc_can_use_concurrent && !temp_disable_concurrent_p && ((settings.pause_mode == pause_interactive) || (settings.pause_mode == pause_sustained_low_latency))) { keep_bgc_threads_p = TRUE; c_write (settings.concurrent, TRUE); memset (&bgc_data_global, 0, sizeof(bgc_data_global)); memcpy (&bgc_data_global, &gc_data_global, sizeof(gc_data_global)); } #endif //BACKGROUND_GC settings.gc_index = (uint32_t)dd_collection_count (dynamic_data_of (0)) + 1; #ifdef MULTIPLE_HEAPS hb_log_balance_activities(); hb_log_new_allocation(); #endif //MULTIPLE_HEAPS // Call the EE for start of GC work GCToEEInterface::GcStartWork (settings.condemned_generation, max_generation); // TODO: we could fire an ETW event to say this GC as a concurrent GC but later on due to not being able to // create threads or whatever, this could be a non concurrent GC. Maybe for concurrent GC we should fire // it in do_background_gc and if it failed to be a CGC we fire it in gc1... in other words, this should be // fired in gc1. do_pre_gc(); #ifdef MULTIPLE_HEAPS gc_start_event.Reset(); dprintf(3, ("Starting all gc threads for gc")); gc_t_join.restart(); #endif //MULTIPLE_HEAPS } descr_generations ("BEGIN"); #if defined(TRACE_GC) && defined(USE_REGIONS) if (heap_number == 0) { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { gc_heap *hp = g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; const int i = 0; #endif //MULTIPLE_HEAPS if (settings.condemned_generation == max_generation) { // print all kinds of free regions region_free_list::print(hp->free_regions, i, "BEGIN"); } else { // print only basic free regions hp->free_regions[basic_free_region].print (i, "BEGIN"); } } } #endif // TRACE_GC && USE_REGIONS #ifdef VERIFY_HEAP if ((GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) && !(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_POST_GC_ONLY)) { verify_heap (TRUE); } if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_BARRIERCHECK) checkGCWriteBarrier(); #endif // VERIFY_HEAP #ifdef BACKGROUND_GC if (settings.concurrent) { // We need to save the settings because we'll need to restore it after each FGC. assert (settings.condemned_generation == max_generation); settings.compaction = FALSE; saved_bgc_settings = settings; #ifdef MULTIPLE_HEAPS if (heap_number == 0) { for (int i = 0; i < n_heaps; i++) { prepare_bgc_thread (g_heaps[i]); } dprintf (2, ("setting bgc_threads_sync_event")); bgc_threads_sync_event.Set(); } else { bgc_threads_sync_event.Wait(INFINITE, FALSE); dprintf (2, ("bgc_threads_sync_event is signalled")); } #else prepare_bgc_thread(0); #endif //MULTIPLE_HEAPS #ifdef MULTIPLE_HEAPS gc_t_join.join(this, gc_join_start_bgc); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { do_concurrent_p = TRUE; do_ephemeral_gc_p = FALSE; #ifdef MULTIPLE_HEAPS dprintf(2, ("Joined to perform a background GC")); for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; if (!(hp->bgc_thread) || !hp->commit_mark_array_bgc_init()) { do_concurrent_p = FALSE; break; } else { hp->background_saved_lowest_address = hp->lowest_address; hp->background_saved_highest_address = hp->highest_address; } } #else do_concurrent_p = (!!bgc_thread && commit_mark_array_bgc_init()); if (do_concurrent_p) { background_saved_lowest_address = lowest_address; background_saved_highest_address = highest_address; } #endif //MULTIPLE_HEAPS if (do_concurrent_p) { #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP SoftwareWriteWatch::EnableForGCHeap(); #endif //FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) g_heaps[i]->current_bgc_state = bgc_initialized; #else current_bgc_state = bgc_initialized; #endif //MULTIPLE_HEAPS int gen = check_for_ephemeral_alloc(); // always do a gen1 GC before we start BGC. dont_restart_ee_p = TRUE; if (gen == -1) { // If we decide to not do a GC before the BGC we need to // restore the gen0 alloc context. #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { generation_allocation_pointer (g_heaps[i]->generation_of (0)) = 0; generation_allocation_limit (g_heaps[i]->generation_of (0)) = 0; } #else generation_allocation_pointer (youngest_generation) = 0; generation_allocation_limit (youngest_generation) = 0; #endif //MULTIPLE_HEAPS } else { do_ephemeral_gc_p = TRUE; settings.init_mechanisms(); settings.condemned_generation = gen; settings.gc_index = (size_t)dd_collection_count (dynamic_data_of (0)) + 2; do_pre_gc(); // TODO BACKGROUND_GC need to add the profiling stuff here. dprintf (GTC_LOG, ("doing gen%d before doing a bgc", gen)); } //clear the cards so they don't bleed in gen 1 during collection // shouldn't this always be done at the beginning of any GC? //clear_card_for_addresses ( // generation_allocation_start (generation_of (0)), // heap_segment_allocated (ephemeral_heap_segment)); if (!do_ephemeral_gc_p) { do_background_gc(); } } else { settings.compaction = TRUE; c_write (settings.concurrent, FALSE); } #ifdef MULTIPLE_HEAPS gc_t_join.restart(); #endif //MULTIPLE_HEAPS } if (do_concurrent_p) { // At this point we are sure we'll be starting a BGC, so save its per heap data here. // global data is only calculated at the end of the GC so we don't need to worry about // FGCs overwriting it. memset (&bgc_data_per_heap, 0, sizeof (bgc_data_per_heap)); memcpy (&bgc_data_per_heap, &gc_data_per_heap, sizeof(gc_data_per_heap)); if (do_ephemeral_gc_p) { dprintf (2, ("GC threads running, doing gen%d GC", settings.condemned_generation)); gen_to_condemn_reasons.init(); gen_to_condemn_reasons.set_condition (gen_before_bgc); gc_data_per_heap.gen_to_condemn_reasons.init (&gen_to_condemn_reasons); gc1(); #ifdef MULTIPLE_HEAPS gc_t_join.join(this, gc_join_bgc_after_ephemeral); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef MULTIPLE_HEAPS do_post_gc(); #endif //MULTIPLE_HEAPS settings = saved_bgc_settings; assert (settings.concurrent); do_background_gc(); #ifdef MULTIPLE_HEAPS gc_t_join.restart(); #endif //MULTIPLE_HEAPS } } } else { dprintf (2, ("couldn't create BGC threads, reverting to doing a blocking GC")); gc1(); } } else #endif //BACKGROUND_GC { gc1(); } #ifndef MULTIPLE_HEAPS allocation_running_time = (size_t)GCToOSInterface::GetLowPrecisionTimeStamp(); allocation_running_amount = dd_new_allocation (dynamic_data_of (0)); fgn_last_alloc = dd_new_allocation (dynamic_data_of (0)); #endif //MULTIPLE_HEAPS done: if (settings.pause_mode == pause_no_gc) allocate_for_no_gc_after_gc(); } #define mark_stack_empty_p() (mark_stack_base == mark_stack_tos) inline size_t gc_heap::get_promoted_bytes() { #ifdef USE_REGIONS if (!survived_per_region) { dprintf (REGIONS_LOG, ("no space to store promoted bytes")); return 0; } dprintf (3, ("h%d getting surv", heap_number)); size_t promoted = 0; for (size_t i = 0; i < region_count; i++) { if (survived_per_region[i] > 0) { heap_segment* region = get_region_at_index (i); dprintf (REGIONS_LOG, ("h%d region[%d] %Ix(g%d)(%s) surv: %Id(%Ix)", heap_number, i, heap_segment_mem (region), heap_segment_gen_num (region), (heap_segment_loh_p (region) ? "LOH" : (heap_segment_poh_p (region) ? "POH" :"SOH")), survived_per_region[i], &survived_per_region[i])); promoted += survived_per_region[i]; } } #ifdef _DEBUG dprintf (REGIONS_LOG, ("h%d global recorded %Id, regions recorded %Id", heap_number, promoted_bytes (heap_number), promoted)); assert (promoted_bytes (heap_number) == promoted); #endif //_DEBUG return promoted; #else //USE_REGIONS #ifdef MULTIPLE_HEAPS return g_promoted [heap_number*16]; #else //MULTIPLE_HEAPS return g_promoted; #endif //MULTIPLE_HEAPS #endif //USE_REGIONS } #ifdef USE_REGIONS void gc_heap::sync_promoted_bytes() { int condemned_gen_number = settings.condemned_generation; int highest_gen_number = ((condemned_gen_number == max_generation) ? (total_generation_count - 1) : settings.condemned_generation); int stop_gen_idx = get_stop_generation_index (condemned_gen_number); #ifdef MULTIPLE_HEAPS // We gather all the promoted bytes for a region recorded by all threads into that region's survived // for plan phase. sore_mark_list will be called shortly and will start using the same storage that // the GC threads used to record promoted bytes. for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS for (int gen_idx = highest_gen_number; gen_idx >= stop_gen_idx; gen_idx--) { generation* condemned_gen = hp->generation_of (gen_idx); heap_segment* current_region = heap_segment_rw (generation_start_segment (condemned_gen)); while (current_region) { size_t region_index = get_basic_region_index_for_address (heap_segment_mem (current_region)); #ifdef MULTIPLE_HEAPS size_t total_surv = 0; size_t total_old_card_surv = 0; for (int hp_idx = 0; hp_idx < n_heaps; hp_idx++) { total_surv += g_heaps[hp_idx]->survived_per_region[region_index]; total_old_card_surv += g_heaps[hp_idx]->old_card_survived_per_region[region_index]; } heap_segment_survived (current_region) = (int)total_surv; heap_segment_old_card_survived (current_region) = (int)total_old_card_surv; #else heap_segment_survived (current_region) = (int)(survived_per_region[region_index]); heap_segment_old_card_survived (current_region) = (int)(old_card_survived_per_region[region_index]); #endif //MULTIPLE_HEAPS dprintf (REGIONS_LOG, ("region #%d %Ix surv %Id, old card surv %Id", region_index, heap_segment_mem (current_region), heap_segment_survived (current_region), heap_segment_old_card_survived (current_region))); current_region = heap_segment_next (current_region); } } } } #ifdef MULTIPLE_HEAPS void gc_heap::set_heap_for_contained_basic_regions (heap_segment* region, gc_heap* hp) { uint8_t* region_start = get_region_start (region); uint8_t* region_end = heap_segment_reserved (region); int num_basic_regions = (int)((region_end - region_start) >> min_segment_size_shr); for (int i = 0; i < num_basic_regions; i++) { uint8_t* basic_region_start = region_start + ((size_t)i << min_segment_size_shr); heap_segment* basic_region = get_region_info (basic_region_start); heap_segment_heap (basic_region) = hp; } } heap_segment* gc_heap::unlink_first_rw_region (int gen_idx) { generation* gen = generation_of (gen_idx); heap_segment* prev_region = generation_tail_ro_region (gen); heap_segment* region = nullptr; if (prev_region) { assert (heap_segment_read_only_p (prev_region)); region = heap_segment_next (prev_region); assert (region != nullptr); // don't remove the last region in the generation if (heap_segment_next (region) == nullptr) { assert (region == generation_tail_region (gen)); return nullptr; } heap_segment_next (prev_region) = heap_segment_next (region); } else { region = generation_start_segment (gen); assert (region != nullptr); // don't remove the last region in the generation if (heap_segment_next (region) == nullptr) { assert (region == generation_tail_region (gen)); return nullptr; } generation_start_segment (gen) = heap_segment_next (region); } assert (region != generation_tail_region (gen)); assert (!heap_segment_read_only_p (region)); dprintf (REGIONS_LOG, ("unlink_first_rw_region on heap: %d gen: %d region: %Ix", heap_number, gen_idx, heap_segment_mem (region))); set_heap_for_contained_basic_regions (region, nullptr); return region; } void gc_heap::thread_rw_region_front (int gen_idx, heap_segment* region) { generation* gen = generation_of (gen_idx); assert (!heap_segment_read_only_p (region)); heap_segment* prev_region = generation_tail_ro_region (gen); if (prev_region) { heap_segment_next (region) = heap_segment_next (prev_region); heap_segment_next (prev_region) = region; } else { heap_segment_next (region) = generation_start_segment (gen); generation_start_segment (gen) = region; } dprintf (REGIONS_LOG, ("thread_rw_region_front on heap: %d gen: %d region: %Ix", heap_number, gen_idx, heap_segment_mem (region))); set_heap_for_contained_basic_regions (region, this); } #endif // MULTIPLE_HEAPS void gc_heap::equalize_promoted_bytes() { #ifdef MULTIPLE_HEAPS // algorithm to roughly balance promoted bytes across heaps by moving regions between heaps // goal is just to balance roughly, while keeping computational complexity low // hope is to achieve better work balancing in relocate and compact phases // int condemned_gen_number = settings.condemned_generation; int highest_gen_number = ((condemned_gen_number == max_generation) ? (total_generation_count - 1) : condemned_gen_number); int stop_gen_idx = get_stop_generation_index (condemned_gen_number); for (int gen_idx = highest_gen_number; gen_idx >= stop_gen_idx; gen_idx--) { // step 1: // compute total promoted bytes per gen size_t total_surv = 0; size_t max_surv_per_heap = 0; size_t surv_per_heap[MAX_SUPPORTED_CPUS]; for (int i = 0; i < n_heaps; i++) { surv_per_heap[i] = 0; gc_heap* hp = g_heaps[i]; generation* condemned_gen = hp->generation_of (gen_idx); heap_segment* current_region = heap_segment_rw (generation_start_segment (condemned_gen)); while (current_region) { total_surv += heap_segment_survived (current_region); surv_per_heap[i] += heap_segment_survived (current_region); current_region = heap_segment_next (current_region); } max_surv_per_heap = max (max_surv_per_heap, surv_per_heap[i]); dprintf (REGIONS_LOG, ("gen: %d heap %d surv: %Id", gen_idx, i, surv_per_heap[i])); } // compute average promoted bytes per heap and per gen // be careful to round up size_t avg_surv_per_heap = (total_surv + n_heaps - 1) / n_heaps; if (avg_surv_per_heap != 0) { dprintf (REGIONS_LOG, ("before equalize: gen: %d avg surv: %Id max_surv: %Id imbalance: %d", gen_idx, avg_surv_per_heap, max_surv_per_heap, max_surv_per_heap*100/avg_surv_per_heap)); } // // step 2: // remove regions from surplus heaps until all heaps are <= average // put removed regions into surplus regions // // step 3: // put regions into size classes by survivorship // put deficit heaps into size classes by deficit // // step 4: // while (surplus regions is non-empty) // get surplus region from biggest size class // put it into heap from biggest deficit size class // re-insert heap by resulting deficit size class heap_segment* surplus_regions = nullptr; size_t max_deficit = 0; size_t max_survived = 0; // go through all the heaps for (int i = 0; i < n_heaps; i++) { // remove regions from this heap until it has average or less survivorship while (surv_per_heap[i] > avg_surv_per_heap) { heap_segment* region = g_heaps[i]->unlink_first_rw_region (gen_idx); if (region == nullptr) { break; } assert (surv_per_heap[i] >= (size_t)heap_segment_survived (region)); dprintf (REGIONS_LOG, ("heap: %d surv: %Id - %Id = %Id", i, surv_per_heap[i], heap_segment_survived (region), surv_per_heap[i] - heap_segment_survived (region))); surv_per_heap[i] -= heap_segment_survived (region); heap_segment_next (region) = surplus_regions; surplus_regions = region; max_survived = max (max_survived, (size_t)heap_segment_survived (region)); } if (surv_per_heap[i] < avg_surv_per_heap) { size_t deficit = avg_surv_per_heap - surv_per_heap[i]; max_deficit = max (max_deficit, deficit); } } // we arrange both surplus regions and deficit heaps by size classes const int NUM_SIZE_CLASSES = 16; heap_segment* surplus_regions_by_size_class[NUM_SIZE_CLASSES]; memset (surplus_regions_by_size_class, 0, sizeof(surplus_regions_by_size_class)); double survived_scale_factor = ((double)NUM_SIZE_CLASSES) / (max_survived + 1); heap_segment* next_region; for (heap_segment* region = surplus_regions; region != nullptr; region = next_region) { int size_class = (int)(heap_segment_survived (region)*survived_scale_factor); assert ((0 <= size_class) && (size_class < NUM_SIZE_CLASSES)); next_region = heap_segment_next (region); heap_segment_next (region) = surplus_regions_by_size_class[size_class]; surplus_regions_by_size_class[size_class] = region; } int next_heap_in_size_class[MAX_SUPPORTED_CPUS]; int heaps_by_deficit_size_class[NUM_SIZE_CLASSES]; for (int i = 0; i < NUM_SIZE_CLASSES; i++) { heaps_by_deficit_size_class[i] = -1; } double deficit_scale_factor = ((double)NUM_SIZE_CLASSES) / (max_deficit + 1); for (int i = 0; i < n_heaps; i++) { if (avg_surv_per_heap > surv_per_heap[i]) { size_t deficit = avg_surv_per_heap - surv_per_heap[i]; int size_class = (int)(deficit*deficit_scale_factor); assert ((0 <= size_class) && (size_class < NUM_SIZE_CLASSES)); next_heap_in_size_class[i] = heaps_by_deficit_size_class[size_class]; heaps_by_deficit_size_class[size_class] = i; } } int region_size_class = NUM_SIZE_CLASSES - 1; int heap_size_class = NUM_SIZE_CLASSES - 1; while (region_size_class >= 0) { // obtain a region from the biggest size class heap_segment* region = surplus_regions_by_size_class[region_size_class]; if (region == nullptr) { region_size_class--; continue; } // and a heap from the biggest deficit size class int heap_num; while (true) { if (heap_size_class < 0) { // put any remaining regions on heap 0 // rare case, but there may be some 0 surv size regions heap_num = 0; break; } heap_num = heaps_by_deficit_size_class[heap_size_class]; if (heap_num >= 0) { break; } heap_size_class--; } // now move the region to the heap surplus_regions_by_size_class[region_size_class] = heap_segment_next (region); g_heaps[heap_num]->thread_rw_region_front (gen_idx, region); // adjust survival for this heap dprintf (REGIONS_LOG, ("heap: %d surv: %Id + %Id = %Id", heap_num, surv_per_heap[heap_num], heap_segment_survived (region), surv_per_heap[heap_num] + heap_segment_survived (region))); surv_per_heap[heap_num] += heap_segment_survived (region); if (heap_size_class < 0) { // no need to update size classes for heaps - // just work down the remaining regions, if any continue; } // is this heap now average or above? if (surv_per_heap[heap_num] >= avg_surv_per_heap) { // if so, unlink from the current size class heaps_by_deficit_size_class[heap_size_class] = next_heap_in_size_class[heap_num]; continue; } // otherwise compute the updated deficit size_t new_deficit = avg_surv_per_heap - surv_per_heap[heap_num]; // check if this heap moves to a differenct deficit size class int new_heap_size_class = (int)(new_deficit*deficit_scale_factor); if (new_heap_size_class != heap_size_class) { // the new deficit size class should be smaller and in range assert (new_heap_size_class < heap_size_class); assert ((0 <= new_heap_size_class) && (new_heap_size_class < NUM_SIZE_CLASSES)); // if so, unlink from the current size class heaps_by_deficit_size_class[heap_size_class] = next_heap_in_size_class[heap_num]; // and link to the new size class next_heap_in_size_class[heap_num] = heaps_by_deficit_size_class[new_heap_size_class]; heaps_by_deficit_size_class[new_heap_size_class] = heap_num; } } // we will generally be left with some heaps with deficits here, but that's ok // check we didn't screw up the data structures for (int i = 0; i < n_heaps; i++) { g_heaps[i]->verify_regions (gen_idx, false); } #ifdef TRACE_GC max_surv_per_heap = 0; for (int i = 0; i < n_heaps; i++) { max_surv_per_heap = max (max_surv_per_heap, surv_per_heap[i]); } if (avg_surv_per_heap != 0) { dprintf (REGIONS_LOG, ("after equalize: gen: %d avg surv: %Id max_surv: %Id imbalance: %d", gen_idx, avg_surv_per_heap, max_surv_per_heap, max_surv_per_heap*100/avg_surv_per_heap)); } #endif // TRACE_GC } #endif //MULTIPLE_HEAPS } #endif //USE_REGIONS #if !defined(USE_REGIONS) || defined(_DEBUG) inline void gc_heap::init_promoted_bytes() { #ifdef MULTIPLE_HEAPS g_promoted [heap_number*16] = 0; #else //MULTIPLE_HEAPS g_promoted = 0; #endif //MULTIPLE_HEAPS } size_t& gc_heap::promoted_bytes (int thread) { #ifdef MULTIPLE_HEAPS return g_promoted [thread*16]; #else //MULTIPLE_HEAPS UNREFERENCED_PARAMETER(thread); return g_promoted; #endif //MULTIPLE_HEAPS } #endif //!USE_REGIONS || _DEBUG inline void gc_heap::add_to_promoted_bytes (uint8_t* object, int thread) { size_t obj_size = size (object); add_to_promoted_bytes (object, obj_size, thread); } inline void gc_heap::add_to_promoted_bytes (uint8_t* object, size_t obj_size, int thread) { assert (thread == heap_number); #ifdef USE_REGIONS if (survived_per_region) { survived_per_region[get_basic_region_index_for_address (object)] += obj_size; } #endif //USE_REGIONS #if !defined(USE_REGIONS) || defined(_DEBUG) #ifdef MULTIPLE_HEAPS g_promoted [heap_number*16] += obj_size; #else //MULTIPLE_HEAPS g_promoted += obj_size; #endif //MULTIPLE_HEAPS #endif //!USE_REGIONS || _DEBUG #ifdef _DEBUG // Verify we keep the 2 recordings in sync. //get_promoted_bytes(); #endif //_DEBUG } heap_segment* gc_heap::find_segment (uint8_t* interior, BOOL small_segment_only_p) { heap_segment* seg = seg_mapping_table_segment_of (interior); if (seg) { if (small_segment_only_p && heap_segment_uoh_p (seg)) return 0; } return seg; } #if !defined(_DEBUG) && !defined(__GNUC__) inline // This causes link errors if global optimization is off #endif //!_DEBUG && !__GNUC__ gc_heap* gc_heap::heap_of (uint8_t* o) { #ifdef MULTIPLE_HEAPS if (o == 0) return g_heaps [0]; gc_heap* hp = seg_mapping_table_heap_of (o); return (hp ? hp : g_heaps[0]); #else //MULTIPLE_HEAPS UNREFERENCED_PARAMETER(o); return __this; #endif //MULTIPLE_HEAPS } inline gc_heap* gc_heap::heap_of_gc (uint8_t* o) { #ifdef MULTIPLE_HEAPS if (o == 0) return g_heaps [0]; gc_heap* hp = seg_mapping_table_heap_of_gc (o); return (hp ? hp : g_heaps[0]); #else //MULTIPLE_HEAPS UNREFERENCED_PARAMETER(o); return __this; #endif //MULTIPLE_HEAPS } // will find all heap objects (large and small) // // Callers of this method need to guarantee the interior pointer is within the heap range. // // If you need it to be stricter, eg if you only want to find an object in ephemeral range, // you should make sure interior is within that range before calling this method. uint8_t* gc_heap::find_object (uint8_t* interior) { assert (interior != 0); if (!gen0_bricks_cleared) { #ifdef MULTIPLE_HEAPS assert (!"Should have already been done in server GC"); #endif //MULTIPLE_HEAPS clear_gen0_bricks(); } //indicate that in the future this needs to be done during allocation gen0_must_clear_bricks = FFIND_DECAY; int brick_entry = get_brick_entry(brick_of (interior)); if (brick_entry == 0) { // this is a pointer to a UOH object heap_segment* seg = find_segment (interior, FALSE); if (seg) { #ifdef FEATURE_CONSERVATIVE_GC if (interior >= heap_segment_allocated(seg)) return 0; #endif // If interior falls within the first free object at the beginning of a generation, // we don't have brick entry for it, and we may incorrectly treat it as on large object heap. int align_const = get_alignment_constant (heap_segment_read_only_p (seg) #ifdef FEATURE_CONSERVATIVE_GC || (GCConfig::GetConservativeGC() && !heap_segment_uoh_p (seg)) #endif ); assert (interior < heap_segment_allocated (seg)); uint8_t* o = heap_segment_mem (seg); while (o < heap_segment_allocated (seg)) { uint8_t* next_o = o + Align (size (o), align_const); assert (next_o > o); if ((o <= interior) && (interior < next_o)) return o; o = next_o; } return 0; } else { return 0; } } else { heap_segment* seg = find_segment (interior, TRUE); if (seg) { #ifdef FEATURE_CONSERVATIVE_GC if (interior >= heap_segment_allocated (seg)) return 0; #else assert (interior < heap_segment_allocated (seg)); #endif uint8_t* o = find_first_object (interior, heap_segment_mem (seg)); return o; } else return 0; } } #ifdef MULTIPLE_HEAPS #ifdef GC_CONFIG_DRIVEN #define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;} else {mark_list_index++;}} #else //GC_CONFIG_DRIVEN #define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;}} #endif //GC_CONFIG_DRIVEN #define m_boundary_fullgc(o) {} #else //MULTIPLE_HEAPS #ifdef GC_CONFIG_DRIVEN #define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;} else {mark_list_index++;} if (slow > o) slow = o; if (shigh < o) shigh = o;} #else #define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;}if (slow > o) slow = o; if (shigh < o) shigh = o;} #endif //GC_CONFIG_DRIVEN #define m_boundary_fullgc(o) {if (slow > o) slow = o; if (shigh < o) shigh = o;} #endif //MULTIPLE_HEAPS #define method_table(o) ((CObjectHeader*)(o))->GetMethodTable() inline BOOL gc_heap::gc_mark1 (uint8_t* o) { BOOL marked = !marked (o); set_marked (o); dprintf (3, ("*%Ix*, newly marked: %d", (size_t)o, marked)); #if defined(USE_REGIONS) && defined(_DEBUG) heap_segment* seg = seg_mapping_table_segment_of (o); if (o > heap_segment_allocated (seg)) { dprintf (REGIONS_LOG, ("%Ix is in seg %Ix(%Ix) but beyond alloc %Ix!!", o, (size_t)seg, heap_segment_mem (seg), heap_segment_allocated (seg))); GCToOSInterface::DebugBreak(); } #endif //USE_REGIONS && _DEBUG return marked; } #ifdef USE_REGIONS inline bool is_in_heap_range (uint8_t* o) { #ifdef FEATURE_BASICFREEZE // we may have frozen objects in read only segments // outside of the reserved address range of the gc heap assert (((g_gc_lowest_address <= o) && (o < g_gc_highest_address)) || (o == nullptr) || (ro_segment_lookup (o) != nullptr)); return ((g_gc_lowest_address <= o) && (o < g_gc_highest_address)); #else //FEATURE_BASICFREEZE // without frozen objects, every non-null pointer must be // within the heap assert ((o == nullptr) || (g_gc_lowest_address <= o) && (o < g_gc_highest_address)); return (o != nullptr); #endif //FEATURE_BASICFREEZE } #endif //USE_REGIONS inline BOOL gc_heap::gc_mark (uint8_t* o, uint8_t* low, uint8_t* high, int condemned_gen) { #ifdef USE_REGIONS assert (low == 0); assert (high == 0); if (is_in_heap_range (o)) { BOOL already_marked = marked (o); if (already_marked) return FALSE; if (condemned_gen == max_generation) { set_marked (o); return TRUE; } int gen = get_region_gen_num (o); if (gen <= condemned_gen) { set_marked (o); return TRUE; } } return FALSE; #else //USE_REGIONS assert (condemned_gen == -1); BOOL marked = FALSE; if ((o >= low) && (o < high)) marked = gc_mark1 (o); #ifdef MULTIPLE_HEAPS else if (o) { gc_heap* hp = heap_of_gc (o); assert (hp); if ((o >= hp->gc_low) && (o < hp->gc_high)) marked = gc_mark1 (o); } #ifdef SNOOP_STATS snoop_stat.objects_checked_count++; if (marked) { snoop_stat.objects_marked_count++; } if (!o) { snoop_stat.zero_ref_count++; } #endif //SNOOP_STATS #endif //MULTIPLE_HEAPS return marked; #endif //USE_REGIONS } #ifdef BACKGROUND_GC inline BOOL gc_heap::background_marked (uint8_t* o) { return mark_array_marked (o); } inline BOOL gc_heap::background_mark1 (uint8_t* o) { BOOL to_mark = !mark_array_marked (o); dprintf (3, ("b*%Ix*b(%d)", (size_t)o, (to_mark ? 1 : 0))); if (to_mark) { mark_array_set_marked (o); dprintf (4, ("n*%Ix*n", (size_t)o)); return TRUE; } else return FALSE; } // TODO: we could consider filtering out NULL's here instead of going to // look for it on other heaps inline BOOL gc_heap::background_mark (uint8_t* o, uint8_t* low, uint8_t* high) { BOOL marked = FALSE; if ((o >= low) && (o < high)) marked = background_mark1 (o); #ifdef MULTIPLE_HEAPS else if (o) { gc_heap* hp = heap_of (o); assert (hp); if ((o >= hp->background_saved_lowest_address) && (o < hp->background_saved_highest_address)) marked = background_mark1 (o); } #endif //MULTIPLE_HEAPS return marked; } #endif //BACKGROUND_GC #define new_start() {if (ppstop <= start) {break;} else {parm = start}} #define ignore_start 0 #define use_start 1 #define go_through_object(mt,o,size,parm,start,start_useful,limit,exp) \ { \ CGCDesc* map = CGCDesc::GetCGCDescFromMT((MethodTable*)(mt)); \ CGCDescSeries* cur = map->GetHighestSeries(); \ ptrdiff_t cnt = (ptrdiff_t) map->GetNumSeries(); \ \ if (cnt >= 0) \ { \ CGCDescSeries* last = map->GetLowestSeries(); \ uint8_t** parm = 0; \ do \ { \ assert (parm <= (uint8_t**)((o) + cur->GetSeriesOffset())); \ parm = (uint8_t**)((o) + cur->GetSeriesOffset()); \ uint8_t** ppstop = \ (uint8_t**)((uint8_t*)parm + cur->GetSeriesSize() + (size));\ if (!start_useful || (uint8_t*)ppstop > (start)) \ { \ if (start_useful && (uint8_t*)parm < (start)) parm = (uint8_t**)(start);\ while (parm < ppstop) \ { \ {exp} \ parm++; \ } \ } \ cur--; \ \ } while (cur >= last); \ } \ else \ { \ /* Handle the repeating case - array of valuetypes */ \ uint8_t** parm = (uint8_t**)((o) + cur->startoffset); \ if (start_useful && start > (uint8_t*)parm) \ { \ ptrdiff_t cs = mt->RawGetComponentSize(); \ parm = (uint8_t**)((uint8_t*)parm + (((start) - (uint8_t*)parm)/cs)*cs); \ } \ while ((uint8_t*)parm < ((o)+(size)-plug_skew)) \ { \ for (ptrdiff_t __i = 0; __i > cnt; __i--) \ { \ HALF_SIZE_T skip = cur->val_serie[__i].skip; \ HALF_SIZE_T nptrs = cur->val_serie[__i].nptrs; \ uint8_t** ppstop = parm + nptrs; \ if (!start_useful || (uint8_t*)ppstop > (start)) \ { \ if (start_useful && (uint8_t*)parm < (start)) parm = (uint8_t**)(start); \ do \ { \ {exp} \ parm++; \ } while (parm < ppstop); \ } \ parm = (uint8_t**)((uint8_t*)ppstop + skip); \ } \ } \ } \ } #define go_through_object_nostart(mt,o,size,parm,exp) {go_through_object(mt,o,size,parm,o,ignore_start,(o + size),exp); } // 1 thing to note about this macro: // 1) you can use *parm safely but in general you don't want to use parm // because for the collectible types it's not an address on the managed heap. #ifndef COLLECTIBLE_CLASS #define go_through_object_cl(mt,o,size,parm,exp) \ { \ if (header(o)->ContainsPointers()) \ { \ go_through_object_nostart(mt,o,size,parm,exp); \ } \ } #else //COLLECTIBLE_CLASS #define go_through_object_cl(mt,o,size,parm,exp) \ { \ if (header(o)->Collectible()) \ { \ uint8_t* class_obj = get_class_object (o); \ uint8_t** parm = &class_obj; \ do {exp} while (false); \ } \ if (header(o)->ContainsPointers()) \ { \ go_through_object_nostart(mt,o,size,parm,exp); \ } \ } #endif //COLLECTIBLE_CLASS // This starts a plug. But mark_stack_tos isn't increased until set_pinned_info is called. void gc_heap::enque_pinned_plug (uint8_t* plug, BOOL save_pre_plug_info_p, uint8_t* last_object_in_last_plug) { if (mark_stack_array_length <= mark_stack_tos) { if (!grow_mark_stack (mark_stack_array, mark_stack_array_length, MARK_STACK_INITIAL_LENGTH)) { // we don't want to continue here due to security // risks. This happens very rarely and fixing it in the // way so that we can continue is a bit involved and will // not be done in Dev10. GCToEEInterface::HandleFatalError((unsigned int)CORINFO_EXCEPTION_GC); } } dprintf (3, ("enqueuing P #%Id(%Ix): %Ix. oldest: %Id, LO: %Ix, pre: %d", mark_stack_tos, &mark_stack_array[mark_stack_tos], plug, mark_stack_bos, last_object_in_last_plug, (save_pre_plug_info_p ? 1 : 0))); mark& m = mark_stack_array[mark_stack_tos]; m.first = plug; // Must be set now because if we have a short object we'll need the value of saved_pre_p. m.saved_pre_p = save_pre_plug_info_p; if (save_pre_plug_info_p) { // In the case of short plugs or doubly linked free lists, there may be extra bits // set in the method table pointer. // Clear these bits for the copy saved in saved_pre_plug, but not for the copy // saved in saved_pre_plug_reloc. // This is because we need these bits for compaction, but not for mark & sweep. size_t special_bits = clear_special_bits (last_object_in_last_plug); // now copy the bits over memcpy (&(m.saved_pre_plug), &(((plug_and_gap*)plug)[-1]), sizeof (gap_reloc_pair)); // restore the bits in the original set_special_bits (last_object_in_last_plug, special_bits); memcpy (&(m.saved_pre_plug_reloc), &(((plug_and_gap*)plug)[-1]), sizeof (gap_reloc_pair)); // If the last object in the last plug is too short, it requires special handling. size_t last_obj_size = plug - last_object_in_last_plug; if (last_obj_size < min_pre_pin_obj_size) { record_interesting_data_point (idp_pre_short); #ifdef SHORT_PLUGS if (is_plug_padded (last_object_in_last_plug)) record_interesting_data_point (idp_pre_short_padded); #endif //SHORT_PLUGS dprintf (3, ("encountered a short object %Ix right before pinned plug %Ix!", last_object_in_last_plug, plug)); // Need to set the short bit regardless of having refs or not because we need to // indicate that this object is not walkable. m.set_pre_short(); #ifdef COLLECTIBLE_CLASS if (is_collectible (last_object_in_last_plug)) { m.set_pre_short_collectible(); } #endif //COLLECTIBLE_CLASS if (contain_pointers (last_object_in_last_plug)) { dprintf (3, ("short object: %Ix(%Ix)", last_object_in_last_plug, last_obj_size)); go_through_object_nostart (method_table(last_object_in_last_plug), last_object_in_last_plug, last_obj_size, pval, { size_t gap_offset = (((size_t)pval - (size_t)(plug - sizeof (gap_reloc_pair) - plug_skew))) / sizeof (uint8_t*); dprintf (3, ("member: %Ix->%Ix, %Id ptrs from beginning of gap", (uint8_t*)pval, *pval, gap_offset)); m.set_pre_short_bit (gap_offset); } ); } } } m.saved_post_p = FALSE; } void gc_heap::save_post_plug_info (uint8_t* last_pinned_plug, uint8_t* last_object_in_last_plug, uint8_t* post_plug) { #ifndef _DEBUG UNREFERENCED_PARAMETER(last_pinned_plug); #endif //_DEBUG mark& m = mark_stack_array[mark_stack_tos - 1]; assert (last_pinned_plug == m.first); m.saved_post_plug_info_start = (uint8_t*)&(((plug_and_gap*)post_plug)[-1]); // In the case of short plugs or doubly linked free lists, there may be extra bits // set in the method table pointer. // Clear these bits for the copy saved in saved_post_plug, but not for the copy // saved in saved_post_plug_reloc. // This is because we need these bits for compaction, but not for mark & sweep. // Note that currently none of these bits will ever be set in the object saved *after* // a pinned plug - this object is currently pinned along with the pinned object before it size_t special_bits = clear_special_bits (last_object_in_last_plug); memcpy (&(m.saved_post_plug), m.saved_post_plug_info_start, sizeof (gap_reloc_pair)); // restore the bits in the original set_special_bits (last_object_in_last_plug, special_bits); memcpy (&(m.saved_post_plug_reloc), m.saved_post_plug_info_start, sizeof (gap_reloc_pair)); // This is important - we need to clear all bits here except the last one. m.saved_post_p = TRUE; #ifdef _DEBUG m.saved_post_plug_debug.gap = 1; #endif //_DEBUG dprintf (3, ("PP %Ix has NP %Ix right after", last_pinned_plug, post_plug)); size_t last_obj_size = post_plug - last_object_in_last_plug; if (last_obj_size < min_pre_pin_obj_size) { dprintf (3, ("PP %Ix last obj %Ix is too short", last_pinned_plug, last_object_in_last_plug)); record_interesting_data_point (idp_post_short); #ifdef SHORT_PLUGS if (is_plug_padded (last_object_in_last_plug)) record_interesting_data_point (idp_post_short_padded); #endif //SHORT_PLUGS m.set_post_short(); #if defined (_DEBUG) && defined (VERIFY_HEAP) verify_pinned_queue_p = TRUE; #endif // _DEBUG && VERIFY_HEAP #ifdef COLLECTIBLE_CLASS if (is_collectible (last_object_in_last_plug)) { m.set_post_short_collectible(); } #endif //COLLECTIBLE_CLASS if (contain_pointers (last_object_in_last_plug)) { dprintf (3, ("short object: %Ix(%Ix)", last_object_in_last_plug, last_obj_size)); // TODO: since we won't be able to walk this object in relocation, we still need to // take care of collectible assemblies here. go_through_object_nostart (method_table(last_object_in_last_plug), last_object_in_last_plug, last_obj_size, pval, { size_t gap_offset = (((size_t)pval - (size_t)(post_plug - sizeof (gap_reloc_pair) - plug_skew))) / sizeof (uint8_t*); dprintf (3, ("member: %Ix->%Ix, %Id ptrs from beginning of gap", (uint8_t*)pval, *pval, gap_offset)); m.set_post_short_bit (gap_offset); } ); } } } //#define PREFETCH #ifdef PREFETCH __declspec(naked) void __fastcall Prefetch(void* addr) { __asm { PREFETCHT0 [ECX] ret }; } #else //PREFETCH inline void Prefetch (void* addr) { UNREFERENCED_PARAMETER(addr); } #endif //PREFETCH #ifdef MH_SC_MARK inline VOLATILE(uint8_t*)& gc_heap::ref_mark_stack (gc_heap* hp, int index) { return ((VOLATILE(uint8_t*)*)(hp->mark_stack_array))[index]; } #endif //MH_SC_MARK #define stolen 2 #define partial 1 #define partial_object 3 inline uint8_t* ref_from_slot (uint8_t* r) { return (uint8_t*)((size_t)r & ~(stolen | partial)); } inline BOOL stolen_p (uint8_t* r) { return (((size_t)r&2) && !((size_t)r&1)); } inline BOOL ready_p (uint8_t* r) { return ((size_t)r != 1); } inline BOOL partial_p (uint8_t* r) { return (((size_t)r&1) && !((size_t)r&2)); } inline BOOL straight_ref_p (uint8_t* r) { return (!stolen_p (r) && !partial_p (r)); } inline BOOL partial_object_p (uint8_t* r) { return (((size_t)r & partial_object) == partial_object); } inline BOOL ref_p (uint8_t* r) { return (straight_ref_p (r) || partial_object_p (r)); } void gc_heap::mark_object_simple1 (uint8_t* oo, uint8_t* start THREAD_NUMBER_DCL) { SERVER_SC_MARK_VOLATILE(uint8_t*)* mark_stack_tos = (SERVER_SC_MARK_VOLATILE(uint8_t*)*)mark_stack_array; SERVER_SC_MARK_VOLATILE(uint8_t*)* mark_stack_limit = (SERVER_SC_MARK_VOLATILE(uint8_t*)*)&mark_stack_array[mark_stack_array_length]; SERVER_SC_MARK_VOLATILE(uint8_t*)* mark_stack_base = mark_stack_tos; #ifdef SORT_MARK_STACK SERVER_SC_MARK_VOLATILE(uint8_t*)* sorted_tos = mark_stack_base; #endif //SORT_MARK_STACK // If we are doing a full GC we don't use mark list anyway so use m_boundary_fullgc that doesn't // update mark list. BOOL full_p = (settings.condemned_generation == max_generation); int condemned_gen = #ifdef USE_REGIONS settings.condemned_generation; #else -1; #endif //USE_REGIONS assert ((start >= oo) && (start < oo+size(oo))); #ifndef MH_SC_MARK *mark_stack_tos = oo; #endif //!MH_SC_MARK while (1) { #ifdef MULTIPLE_HEAPS #else //MULTIPLE_HEAPS const int thread = 0; #endif //MULTIPLE_HEAPS if (oo && ((size_t)oo != 4)) { size_t s = 0; if (stolen_p (oo)) { --mark_stack_tos; goto next_level; } else if (!partial_p (oo) && ((s = size (oo)) < (partial_size_th*sizeof (uint8_t*)))) { BOOL overflow_p = FALSE; if (mark_stack_tos + (s) /sizeof (uint8_t*) >= (mark_stack_limit - 1)) { size_t num_components = ((method_table(oo))->HasComponentSize() ? ((CObjectHeader*)oo)->GetNumComponents() : 0); if (mark_stack_tos + CGCDesc::GetNumPointers(method_table(oo), s, num_components) >= (mark_stack_limit - 1)) { overflow_p = TRUE; } } if (overflow_p == FALSE) { dprintf(3,("pushing mark for %Ix ", (size_t)oo)); go_through_object_cl (method_table(oo), oo, s, ppslot, { uint8_t* o = *ppslot; Prefetch(o); if (gc_mark (o, gc_low, gc_high, condemned_gen)) { if (full_p) { m_boundary_fullgc (o); } else { m_boundary (o); } add_to_promoted_bytes (o, thread); if (contain_pointers_or_collectible (o)) { *(mark_stack_tos++) = o; } } } ); } else { dprintf(3,("mark stack overflow for object %Ix ", (size_t)oo)); min_overflow_address = min (min_overflow_address, oo); max_overflow_address = max (max_overflow_address, oo); } } else { if (partial_p (oo)) { start = ref_from_slot (oo); oo = ref_from_slot (*(--mark_stack_tos)); dprintf (4, ("oo: %Ix, start: %Ix\n", (size_t)oo, (size_t)start)); assert ((oo < start) && (start < (oo + size (oo)))); } #ifdef COLLECTIBLE_CLASS else { // If there's a class object, push it now. We are guaranteed to have the slot since // we just popped one object off. if (is_collectible (oo)) { uint8_t* class_obj = get_class_object (oo); if (gc_mark (class_obj, gc_low, gc_high, condemned_gen)) { if (full_p) { m_boundary_fullgc (class_obj); } else { m_boundary (class_obj); } add_to_promoted_bytes (class_obj, thread); *(mark_stack_tos++) = class_obj; // The code below expects that the oo is still stored in the stack slot that was // just popped and it "pushes" it back just by incrementing the mark_stack_tos. // But the class_obj has just overwritten that stack slot and so the oo needs to // be stored to the new slot that's pointed to by the mark_stack_tos. *mark_stack_tos = oo; } } if (!contain_pointers (oo)) { goto next_level; } } #endif //COLLECTIBLE_CLASS s = size (oo); BOOL overflow_p = FALSE; if (mark_stack_tos + (num_partial_refs + 2) >= mark_stack_limit) { overflow_p = TRUE; } if (overflow_p == FALSE) { dprintf(3,("pushing mark for %Ix ", (size_t)oo)); //push the object and its current SERVER_SC_MARK_VOLATILE(uint8_t*)* place = ++mark_stack_tos; mark_stack_tos++; #ifdef MH_SC_MARK *(place-1) = 0; *(place) = (uint8_t*)partial; #endif //MH_SC_MARK int i = num_partial_refs; uint8_t* ref_to_continue = 0; go_through_object (method_table(oo), oo, s, ppslot, start, use_start, (oo + s), { uint8_t* o = *ppslot; Prefetch(o); if (gc_mark (o, gc_low, gc_high,condemned_gen)) { if (full_p) { m_boundary_fullgc (o); } else { m_boundary (o); } add_to_promoted_bytes (o, thread); if (contain_pointers_or_collectible (o)) { *(mark_stack_tos++) = o; if (--i == 0) { ref_to_continue = (uint8_t*)((size_t)(ppslot+1) | partial); goto more_to_do; } } } } ); //we are finished with this object assert (ref_to_continue == 0); #ifdef MH_SC_MARK assert ((*(place-1)) == (uint8_t*)0); #else //MH_SC_MARK *(place-1) = 0; #endif //MH_SC_MARK *place = 0; // shouldn't we decrease tos by 2 here?? more_to_do: if (ref_to_continue) { //update the start #ifdef MH_SC_MARK assert ((*(place-1)) == (uint8_t*)0); *(place-1) = (uint8_t*)((size_t)oo | partial_object); assert (((*place) == (uint8_t*)1) || ((*place) == (uint8_t*)2)); #endif //MH_SC_MARK *place = ref_to_continue; } } else { dprintf(3,("mark stack overflow for object %Ix ", (size_t)oo)); min_overflow_address = min (min_overflow_address, oo); max_overflow_address = max (max_overflow_address, oo); } } #ifdef SORT_MARK_STACK if (mark_stack_tos > sorted_tos + mark_stack_array_length/8) { rqsort1 (sorted_tos, mark_stack_tos-1); sorted_tos = mark_stack_tos-1; } #endif //SORT_MARK_STACK } next_level: if (!(mark_stack_empty_p())) { oo = *(--mark_stack_tos); start = oo; #ifdef SORT_MARK_STACK sorted_tos = min ((size_t)sorted_tos, (size_t)mark_stack_tos); #endif //SORT_MARK_STACK } else break; } } #ifdef MH_SC_MARK BOOL same_numa_node_p (int hn1, int hn2) { return (heap_select::find_numa_node_from_heap_no (hn1) == heap_select::find_numa_node_from_heap_no (hn2)); } int find_next_buddy_heap (int this_heap_number, int current_buddy, int n_heaps) { int hn = (current_buddy+1)%n_heaps; while (hn != current_buddy) { if ((this_heap_number != hn) && (same_numa_node_p (this_heap_number, hn))) return hn; hn = (hn+1)%n_heaps; } return current_buddy; } void gc_heap::mark_steal() { mark_stack_busy() = 0; //clear the mark stack in the snooping range for (int i = 0; i < max_snoop_level; i++) { ((VOLATILE(uint8_t*)*)(mark_stack_array))[i] = 0; } //pick the next heap as our buddy int thpn = find_next_buddy_heap (heap_number, heap_number, n_heaps); #ifdef SNOOP_STATS dprintf (SNOOP_LOG, ("(GC%d)heap%d: start snooping %d", settings.gc_index, heap_number, (heap_number+1)%n_heaps)); uint32_t begin_tick = GCToOSInterface::GetLowPrecisionTimeStamp(); #endif //SNOOP_STATS int idle_loop_count = 0; int first_not_ready_level = 0; while (1) { gc_heap* hp = g_heaps [thpn]; int level = first_not_ready_level; first_not_ready_level = 0; while (check_next_mark_stack (hp) && (level < (max_snoop_level-1))) { idle_loop_count = 0; #ifdef SNOOP_STATS snoop_stat.busy_count++; dprintf (SNOOP_LOG, ("heap%d: looking at next heap level %d stack contents: %Ix", heap_number, level, (int)((uint8_t**)(hp->mark_stack_array))[level])); #endif //SNOOP_STATS uint8_t* o = ref_mark_stack (hp, level); uint8_t* start = o; if (ref_p (o)) { mark_stack_busy() = 1; BOOL success = TRUE; uint8_t* next = (ref_mark_stack (hp, level+1)); if (ref_p (next)) { if (((size_t)o > 4) && !partial_object_p (o)) { //this is a normal object, not a partial mark tuple //success = (Interlocked::CompareExchangePointer (&ref_mark_stack (hp, level), 0, o)==o); success = (Interlocked::CompareExchangePointer (&ref_mark_stack (hp, level), (uint8_t*)4, o)==o); #ifdef SNOOP_STATS snoop_stat.interlocked_count++; if (success) snoop_stat.normal_count++; #endif //SNOOP_STATS } else { //it is a stolen entry, or beginning/ending of a partial mark level++; #ifdef SNOOP_STATS snoop_stat.stolen_or_pm_count++; #endif //SNOOP_STATS success = FALSE; } } else if (stolen_p (next)) { //ignore the stolen guy and go to the next level success = FALSE; level+=2; #ifdef SNOOP_STATS snoop_stat.stolen_entry_count++; #endif //SNOOP_STATS } else { assert (partial_p (next)); start = ref_from_slot (next); //re-read the object o = ref_from_slot (ref_mark_stack (hp, level)); if (o && start) { //steal the object success = (Interlocked::CompareExchangePointer (&ref_mark_stack (hp, level+1), (uint8_t*)stolen, next) == next); #ifdef SNOOP_STATS snoop_stat.interlocked_count++; if (success) { snoop_stat.partial_mark_parent_count++; } #endif //SNOOP_STATS } else { // stack is not ready, or o is completely different from the last time we read from this stack level. // go up 2 levels to steal children or totally unrelated objects. success = FALSE; if (first_not_ready_level == 0) { first_not_ready_level = level; } level+=2; #ifdef SNOOP_STATS snoop_stat.pm_not_ready_count++; #endif //SNOOP_STATS } } if (success) { #ifdef SNOOP_STATS dprintf (SNOOP_LOG, ("heap%d: marking %Ix from %d [%d] tl:%dms", heap_number, (size_t)o, (heap_number+1)%n_heaps, level, (GCToOSInterface::GetLowPrecisionTimeStamp()-begin_tick))); uint32_t start_tick = GCToOSInterface::GetLowPrecisionTimeStamp(); #endif //SNOOP_STATS mark_object_simple1 (o, start, heap_number); #ifdef SNOOP_STATS dprintf (SNOOP_LOG, ("heap%d: done marking %Ix from %d [%d] %dms tl:%dms", heap_number, (size_t)o, (heap_number+1)%n_heaps, level, (GCToOSInterface::GetLowPrecisionTimeStamp()-start_tick),(GCToOSInterface::GetLowPrecisionTimeStamp()-begin_tick))); #endif //SNOOP_STATS mark_stack_busy() = 0; //clear the mark stack in snooping range for (int i = 0; i < max_snoop_level; i++) { if (((uint8_t**)mark_stack_array)[i] != 0) { ((VOLATILE(uint8_t*)*)(mark_stack_array))[i] = 0; #ifdef SNOOP_STATS snoop_stat.stack_bottom_clear_count++; #endif //SNOOP_STATS } } level = 0; } mark_stack_busy() = 0; } else { //slot is either partial or stolen level++; } } if ((first_not_ready_level != 0) && hp->mark_stack_busy()) { continue; } if (!hp->mark_stack_busy()) { first_not_ready_level = 0; idle_loop_count++; if ((idle_loop_count % (6) )==1) { #ifdef SNOOP_STATS snoop_stat.switch_to_thread_count++; #endif //SNOOP_STATS GCToOSInterface::Sleep(1); } int free_count = 1; #ifdef SNOOP_STATS snoop_stat.stack_idle_count++; //dprintf (SNOOP_LOG, ("heap%d: counting idle threads", heap_number)); #endif //SNOOP_STATS for (int hpn = (heap_number+1)%n_heaps; hpn != heap_number;) { if (!((g_heaps [hpn])->mark_stack_busy())) { free_count++; #ifdef SNOOP_STATS dprintf (SNOOP_LOG, ("heap%d: %d idle", heap_number, free_count)); #endif //SNOOP_STATS } else if (same_numa_node_p (hpn, heap_number) || ((idle_loop_count%1000))==999) { thpn = hpn; break; } hpn = (hpn+1)%n_heaps; YieldProcessor(); } if (free_count == n_heaps) { break; } } } } inline BOOL gc_heap::check_next_mark_stack (gc_heap* next_heap) { #ifdef SNOOP_STATS snoop_stat.check_level_count++; #endif //SNOOP_STATS return (next_heap->mark_stack_busy()>=1); } #endif //MH_SC_MARK #ifdef SNOOP_STATS void gc_heap::print_snoop_stat() { dprintf (1234, ("%4s | %8s | %8s | %8s | %8s | %8s | %8s | %8s", "heap", "check", "zero", "mark", "stole", "pstack", "nstack", "nonsk")); dprintf (1234, ("%4d | %8d | %8d | %8d | %8d | %8d | %8d | %8d", snoop_stat.heap_index, snoop_stat.objects_checked_count, snoop_stat.zero_ref_count, snoop_stat.objects_marked_count, snoop_stat.stolen_stack_count, snoop_stat.partial_stack_count, snoop_stat.normal_stack_count, snoop_stat.non_stack_count)); dprintf (1234, ("%4s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s", "heap", "level", "busy", "xchg", "pmparent", "s_pm", "stolen", "nready", "clear")); dprintf (1234, ("%4d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d\n", snoop_stat.heap_index, snoop_stat.check_level_count, snoop_stat.busy_count, snoop_stat.interlocked_count, snoop_stat.partial_mark_parent_count, snoop_stat.stolen_or_pm_count, snoop_stat.stolen_entry_count, snoop_stat.pm_not_ready_count, snoop_stat.normal_count, snoop_stat.stack_bottom_clear_count)); printf ("\n%4s | %8s | %8s | %8s | %8s | %8s\n", "heap", "check", "zero", "mark", "idle", "switch"); printf ("%4d | %8d | %8d | %8d | %8d | %8d\n", snoop_stat.heap_index, snoop_stat.objects_checked_count, snoop_stat.zero_ref_count, snoop_stat.objects_marked_count, snoop_stat.stack_idle_count, snoop_stat.switch_to_thread_count); printf ("%4s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s\n", "heap", "level", "busy", "xchg", "pmparent", "s_pm", "stolen", "nready", "normal", "clear"); printf ("%4d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d\n", snoop_stat.heap_index, snoop_stat.check_level_count, snoop_stat.busy_count, snoop_stat.interlocked_count, snoop_stat.partial_mark_parent_count, snoop_stat.stolen_or_pm_count, snoop_stat.stolen_entry_count, snoop_stat.pm_not_ready_count, snoop_stat.normal_count, snoop_stat.stack_bottom_clear_count); } #endif //SNOOP_STATS #ifdef HEAP_ANALYZE void gc_heap::ha_mark_object_simple (uint8_t** po THREAD_NUMBER_DCL) { if (!internal_root_array) { internal_root_array = new (nothrow) uint8_t* [internal_root_array_length]; if (!internal_root_array) { heap_analyze_success = FALSE; } } if (heap_analyze_success && (internal_root_array_length <= internal_root_array_index)) { size_t new_size = 2*internal_root_array_length; uint64_t available_physical = 0; get_memory_info (NULL, &available_physical); if (new_size > (size_t)(available_physical / 10)) { heap_analyze_success = FALSE; } else { uint8_t** tmp = new (nothrow) uint8_t* [new_size]; if (tmp) { memcpy (tmp, internal_root_array, internal_root_array_length*sizeof (uint8_t*)); delete[] internal_root_array; internal_root_array = tmp; internal_root_array_length = new_size; } else { heap_analyze_success = FALSE; } } } if (heap_analyze_success) { PREFIX_ASSUME(internal_root_array_index < internal_root_array_length); uint8_t* ref = (uint8_t*)po; if (!current_obj || !((ref >= current_obj) && (ref < (current_obj + current_obj_size)))) { gc_heap* hp = gc_heap::heap_of (ref); current_obj = hp->find_object (ref); current_obj_size = size (current_obj); internal_root_array[internal_root_array_index] = current_obj; internal_root_array_index++; } } mark_object_simple (po THREAD_NUMBER_ARG); } #endif //HEAP_ANALYZE //this method assumes that *po is in the [low. high[ range void gc_heap::mark_object_simple (uint8_t** po THREAD_NUMBER_DCL) { int condemned_gen = #ifdef USE_REGIONS settings.condemned_generation; #else -1; #endif //USE_REGIONS uint8_t* o = *po; #ifndef MULTIPLE_HEAPS const int thread = 0; #endif //MULTIPLE_HEAPS { #ifdef SNOOP_STATS snoop_stat.objects_checked_count++; #endif //SNOOP_STATS if (gc_mark1 (o)) { m_boundary (o); size_t s = size (o); add_to_promoted_bytes (o, s, thread); { go_through_object_cl (method_table(o), o, s, poo, { uint8_t* oo = *poo; if (gc_mark (oo, gc_low, gc_high, condemned_gen)) { m_boundary (oo); add_to_promoted_bytes (oo, thread); if (contain_pointers_or_collectible (oo)) mark_object_simple1 (oo, oo THREAD_NUMBER_ARG); } } ); } } } } inline void gc_heap::mark_object (uint8_t* o THREAD_NUMBER_DCL) { #ifdef USE_REGIONS if (is_in_heap_range (o) && is_in_condemned_gc (o)) { mark_object_simple (&o THREAD_NUMBER_ARG); } #else //USE_REGIONS if ((o >= gc_low) && (o < gc_high)) mark_object_simple (&o THREAD_NUMBER_ARG); #ifdef MULTIPLE_HEAPS else if (o) { gc_heap* hp = heap_of (o); assert (hp); if ((o >= hp->gc_low) && (o < hp->gc_high)) mark_object_simple (&o THREAD_NUMBER_ARG); } #endif //MULTIPLE_HEAPS #endif //USE_REGIONS } #ifdef BACKGROUND_GC #ifdef USE_REGIONS void gc_heap::set_background_overflow_p (uint8_t* oo) { heap_segment* overflow_region = get_region_info_for_address (oo); overflow_region->flags |= heap_segment_flags_overflow; dprintf (3,("setting overflow flag for region %p", heap_segment_mem (overflow_region))); #ifdef MULTIPLE_HEAPS gc_heap* overflow_heap = heap_segment_heap (overflow_region); #else gc_heap* overflow_heap = nullptr; #endif overflow_heap->background_overflow_p = TRUE; } #endif //USE_REGIONS void gc_heap::background_mark_simple1 (uint8_t* oo THREAD_NUMBER_DCL) { uint8_t** mark_stack_limit = &background_mark_stack_array[background_mark_stack_array_length]; #ifdef SORT_MARK_STACK uint8_t** sorted_tos = background_mark_stack_array; #endif //SORT_MARK_STACK background_mark_stack_tos = background_mark_stack_array; while (1) { #ifdef MULTIPLE_HEAPS #else //MULTIPLE_HEAPS const int thread = 0; #endif //MULTIPLE_HEAPS if (oo) { size_t s = 0; if ((((size_t)oo & 1) == 0) && ((s = size (oo)) < (partial_size_th*sizeof (uint8_t*)))) { BOOL overflow_p = FALSE; if (background_mark_stack_tos + (s) /sizeof (uint8_t*) >= (mark_stack_limit - 1)) { size_t num_components = ((method_table(oo))->HasComponentSize() ? ((CObjectHeader*)oo)->GetNumComponents() : 0); size_t num_pointers = CGCDesc::GetNumPointers(method_table(oo), s, num_components); if (background_mark_stack_tos + num_pointers >= (mark_stack_limit - 1)) { dprintf (2, ("h%d: %Id left, obj (mt: %Ix) %Id ptrs", heap_number, (size_t)(mark_stack_limit - 1 - background_mark_stack_tos), method_table(oo), num_pointers)); bgc_overflow_count++; overflow_p = TRUE; } } if (overflow_p == FALSE) { dprintf(3,("pushing mark for %Ix ", (size_t)oo)); go_through_object_cl (method_table(oo), oo, s, ppslot, { uint8_t* o = *ppslot; Prefetch(o); if (background_mark (o, background_saved_lowest_address, background_saved_highest_address)) { //m_boundary (o); size_t obj_size = size (o); bpromoted_bytes (thread) += obj_size; if (contain_pointers_or_collectible (o)) { *(background_mark_stack_tos++) = o; } } } ); } else { dprintf (3,("background mark stack overflow for object %Ix ", (size_t)oo)); #ifdef USE_REGIONS set_background_overflow_p (oo); #else //USE_REGIONS background_min_overflow_address = min (background_min_overflow_address, oo); background_max_overflow_address = max (background_max_overflow_address, oo); #endif //USE_REGIONS } } else { uint8_t* start = oo; if ((size_t)oo & 1) { oo = (uint8_t*)((size_t)oo & ~1); start = *(--background_mark_stack_tos); dprintf (4, ("oo: %Ix, start: %Ix\n", (size_t)oo, (size_t)start)); } #ifdef COLLECTIBLE_CLASS else { // If there's a class object, push it now. We are guaranteed to have the slot since // we just popped one object off. if (is_collectible (oo)) { uint8_t* class_obj = get_class_object (oo); if (background_mark (class_obj, background_saved_lowest_address, background_saved_highest_address)) { size_t obj_size = size (class_obj); bpromoted_bytes (thread) += obj_size; *(background_mark_stack_tos++) = class_obj; } } if (!contain_pointers (oo)) { goto next_level; } } #endif //COLLECTIBLE_CLASS s = size (oo); BOOL overflow_p = FALSE; if (background_mark_stack_tos + (num_partial_refs + 2) >= mark_stack_limit) { size_t num_components = ((method_table(oo))->HasComponentSize() ? ((CObjectHeader*)oo)->GetNumComponents() : 0); size_t num_pointers = CGCDesc::GetNumPointers(method_table(oo), s, num_components); dprintf (2, ("h%d: PM: %Id left, obj %Ix (mt: %Ix) start: %Ix, total: %Id", heap_number, (size_t)(mark_stack_limit - background_mark_stack_tos), oo, method_table(oo), start, num_pointers)); bgc_overflow_count++; overflow_p = TRUE; } if (overflow_p == FALSE) { dprintf(3,("pushing mark for %Ix ", (size_t)oo)); //push the object and its current uint8_t** place = background_mark_stack_tos++; *(place) = start; *(background_mark_stack_tos++) = (uint8_t*)((size_t)oo | 1); int num_pushed_refs = num_partial_refs; int num_processed_refs = num_pushed_refs * 16; go_through_object (method_table(oo), oo, s, ppslot, start, use_start, (oo + s), { uint8_t* o = *ppslot; Prefetch(o); if (background_mark (o, background_saved_lowest_address, background_saved_highest_address)) { //m_boundary (o); size_t obj_size = size (o); bpromoted_bytes (thread) += obj_size; if (contain_pointers_or_collectible (o)) { *(background_mark_stack_tos++) = o; if (--num_pushed_refs == 0) { //update the start *place = (uint8_t*)(ppslot+1); goto more_to_do; } } } if (--num_processed_refs == 0) { // give foreground GC a chance to run *place = (uint8_t*)(ppslot + 1); goto more_to_do; } } ); //we are finished with this object *place = 0; *(place+1) = 0; more_to_do:; } else { dprintf (3,("background mark stack overflow for object %Ix ", (size_t)oo)); #ifdef USE_REGIONS set_background_overflow_p (oo); #else //USE_REGIONS background_min_overflow_address = min (background_min_overflow_address, oo); background_max_overflow_address = max (background_max_overflow_address, oo); #endif //USE_REGIONS } } } #ifdef SORT_MARK_STACK if (background_mark_stack_tos > sorted_tos + mark_stack_array_length/8) { rqsort1 (sorted_tos, background_mark_stack_tos-1); sorted_tos = background_mark_stack_tos-1; } #endif //SORT_MARK_STACK #ifdef COLLECTIBLE_CLASS next_level: #endif // COLLECTIBLE_CLASS allow_fgc(); if (!(background_mark_stack_tos == background_mark_stack_array)) { oo = *(--background_mark_stack_tos); #ifdef SORT_MARK_STACK sorted_tos = (uint8_t**)min ((size_t)sorted_tos, (size_t)background_mark_stack_tos); #endif //SORT_MARK_STACK } else break; } assert (background_mark_stack_tos == background_mark_stack_array); } //this version is different than the foreground GC because //it can't keep pointers to the inside of an object //while calling background_mark_simple1. The object could be moved //by an intervening foreground gc. //this method assumes that *po is in the [low. high[ range void gc_heap::background_mark_simple (uint8_t* o THREAD_NUMBER_DCL) { #ifdef MULTIPLE_HEAPS #else //MULTIPLE_HEAPS const int thread = 0; #endif //MULTIPLE_HEAPS { dprintf (3, ("bmarking %Ix", o)); if (background_mark1 (o)) { //m_boundary (o); size_t s = size (o); bpromoted_bytes (thread) += s; if (contain_pointers_or_collectible (o)) { background_mark_simple1 (o THREAD_NUMBER_ARG); } } allow_fgc(); } } inline uint8_t* gc_heap::background_mark_object (uint8_t* o THREAD_NUMBER_DCL) { if ((o >= background_saved_lowest_address) && (o < background_saved_highest_address)) { background_mark_simple (o THREAD_NUMBER_ARG); } else { if (o) { dprintf (3, ("or-%Ix", o)); } } return o; } void gc_heap::background_promote (Object** ppObject, ScanContext* sc, uint32_t flags) { UNREFERENCED_PARAMETER(sc); //in order to save space on the array, mark the object, //knowing that it will be visited later assert (settings.concurrent); THREAD_NUMBER_FROM_CONTEXT; #ifndef MULTIPLE_HEAPS const int thread = 0; #endif //!MULTIPLE_HEAPS uint8_t* o = (uint8_t*)*ppObject; if (o == 0) return; #ifdef DEBUG_DestroyedHandleValue // we can race with destroy handle during concurrent scan if (o == (uint8_t*)DEBUG_DestroyedHandleValue) return; #endif //DEBUG_DestroyedHandleValue HEAP_FROM_THREAD; gc_heap* hp = gc_heap::heap_of (o); if ((o < hp->background_saved_lowest_address) || (o >= hp->background_saved_highest_address)) { return; } if (flags & GC_CALL_INTERIOR) { o = hp->find_object (o); if (o == 0) return; } #ifdef FEATURE_CONSERVATIVE_GC // For conservative GC, a value on stack may point to middle of a free object. // In this case, we don't need to promote the pointer. if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree()) { return; } #endif //FEATURE_CONSERVATIVE_GC #ifdef _DEBUG ((CObjectHeader*)o)->Validate(); #endif //_DEBUG //needs to be called before the marking because it is possible for a foreground //gc to take place during the mark and move the object STRESS_LOG3(LF_GC|LF_GCROOTS, LL_INFO1000000, " GCHeap::Promote: Promote GC Root *%p = %p MT = %pT", ppObject, o, o ? ((Object*) o)->GetGCSafeMethodTable() : NULL); hpt->background_mark_simple (o THREAD_NUMBER_ARG); } //used by the ephemeral collection to scan the local background structures //containing references. void gc_heap::scan_background_roots (promote_func* fn, int hn, ScanContext *pSC) { ScanContext sc; if (pSC == 0) pSC = &sc; pSC->thread_number = hn; BOOL relocate_p = (fn == &GCHeap::Relocate); dprintf (3, ("Scanning background mark list")); //scan mark_list size_t mark_list_finger = 0; while (mark_list_finger < c_mark_list_index) { uint8_t** o = &c_mark_list [mark_list_finger]; if (!relocate_p) { // We may not be able to calculate the size during relocate as POPO // may have written over the object. size_t s = size (*o); assert (Align (s) >= Align (min_obj_size)); dprintf(3,("background root %Ix", (size_t)*o)); } (*fn) ((Object**)o, pSC, 0); mark_list_finger++; } //scan the mark stack dprintf (3, ("Scanning background mark stack")); uint8_t** finger = background_mark_stack_array; while (finger < background_mark_stack_tos) { if ((finger + 1) < background_mark_stack_tos) { // We need to check for the partial mark case here. uint8_t* parent_obj = *(finger + 1); if ((size_t)parent_obj & 1) { uint8_t* place = *finger; size_t place_offset = 0; uint8_t* real_parent_obj = (uint8_t*)((size_t)parent_obj & ~1); if (relocate_p) { *(finger + 1) = real_parent_obj; place_offset = place - real_parent_obj; dprintf(3,("relocating background root %Ix", (size_t)real_parent_obj)); (*fn) ((Object**)(finger + 1), pSC, 0); real_parent_obj = *(finger + 1); *finger = real_parent_obj + place_offset; *(finger + 1) = (uint8_t*)((size_t)real_parent_obj | 1); dprintf(3,("roots changed to %Ix, %Ix", *finger, *(finger + 1))); } else { uint8_t** temp = &real_parent_obj; dprintf(3,("marking background root %Ix", (size_t)real_parent_obj)); (*fn) ((Object**)temp, pSC, 0); } finger += 2; continue; } } dprintf(3,("background root %Ix", (size_t)*finger)); (*fn) ((Object**)finger, pSC, 0); finger++; } } void gc_heap::grow_bgc_mark_stack (size_t new_size) { if ((background_mark_stack_array_length < new_size) && ((new_size - background_mark_stack_array_length) > (background_mark_stack_array_length / 2))) { dprintf (2, ("h%d: ov grow to %Id", heap_number, new_size)); uint8_t** tmp = new (nothrow) uint8_t* [new_size]; if (tmp) { delete [] background_mark_stack_array; background_mark_stack_array = tmp; background_mark_stack_array_length = new_size; background_mark_stack_tos = background_mark_stack_array; } } } void gc_heap::check_bgc_mark_stack_length() { if ((settings.condemned_generation < (max_generation - 1)) || gc_heap::background_running_p()) return; size_t total_heap_size = get_total_heap_size(); if (total_heap_size < ((size_t)4*1024*1024*1024)) return; #ifdef MULTIPLE_HEAPS int total_heaps = n_heaps; #else int total_heaps = 1; #endif //MULTIPLE_HEAPS size_t size_based_on_heap = total_heap_size / (size_t)(100 * 100 * total_heaps * sizeof (uint8_t*)); size_t new_size = max (background_mark_stack_array_length, size_based_on_heap); grow_bgc_mark_stack (new_size); } uint8_t* gc_heap::background_seg_end (heap_segment* seg, BOOL concurrent_p) { #ifndef USE_REGIONS if (concurrent_p && (seg == saved_overflow_ephemeral_seg)) { // for now we stop at where gen1 started when we started processing return background_min_soh_overflow_address; } else #endif //!USE_REGIONS { return heap_segment_allocated (seg); } } uint8_t* gc_heap::background_first_overflow (uint8_t* min_add, heap_segment* seg, BOOL concurrent_p, BOOL small_object_p) { #ifdef USE_REGIONS return heap_segment_mem (seg); #else uint8_t* o = 0; if (small_object_p) { if (in_range_for_segment (min_add, seg)) { // min_add was the beginning of gen1 when we did the concurrent // overflow. Now we could be in a situation where min_add is // actually the same as allocated for that segment (because // we expanded heap), in which case we can not call // find first on this address or we will AV. if (min_add >= heap_segment_allocated (seg)) { return min_add; } else { if (concurrent_p && ((seg == saved_overflow_ephemeral_seg) && (min_add >= background_min_soh_overflow_address))) { return background_min_soh_overflow_address; } else { o = find_first_object (min_add, heap_segment_mem (seg)); return o; } } } } o = max (heap_segment_mem (seg), min_add); return o; #endif //USE_REGIONS } void gc_heap::background_process_mark_overflow_internal (uint8_t* min_add, uint8_t* max_add, BOOL concurrent_p) { if (concurrent_p) { current_bgc_state = bgc_overflow_soh; } size_t total_marked_objects = 0; #ifdef MULTIPLE_HEAPS int thread = heap_number; #endif //MULTIPLE_HEAPS int start_gen_idx = get_start_generation_index(); #ifdef USE_REGIONS if (concurrent_p) start_gen_idx = max_generation; #endif //USE_REGIONS exclusive_sync* loh_alloc_lock = 0; #ifndef USE_REGIONS dprintf (2,("Processing Mark overflow [%Ix %Ix]", (size_t)min_add, (size_t)max_add)); #endif #ifdef MULTIPLE_HEAPS // We don't have each heap scan all heaps concurrently because we are worried about // multiple threads calling things like find_first_object. int h_start = (concurrent_p ? heap_number : 0); int h_end = (concurrent_p ? (heap_number + 1) : n_heaps); for (int hi = h_start; hi < h_end; hi++) { gc_heap* hp = (concurrent_p ? this : g_heaps [(heap_number + hi) % n_heaps]); #else { gc_heap* hp = 0; #endif //MULTIPLE_HEAPS BOOL small_object_segments = TRUE; loh_alloc_lock = hp->bgc_alloc_lock; for (int i = start_gen_idx; i < total_generation_count; i++) { int align_const = get_alignment_constant (small_object_segments); generation* gen = hp->generation_of (i); heap_segment* seg = heap_segment_in_range (generation_start_segment (gen)); PREFIX_ASSUME(seg != NULL); uint8_t* current_min_add = min_add; uint8_t* current_max_add = max_add; while (seg) { #ifdef USE_REGIONS if (heap_segment_overflow_p (seg)) { seg->flags &= ~heap_segment_flags_overflow; current_min_add = heap_segment_mem (seg); current_max_add = heap_segment_allocated (seg); dprintf (2,("Processing Mark overflow [%Ix %Ix]", (size_t)current_min_add, (size_t)current_max_add)); } else { current_min_add = current_max_add = 0; } #endif //USE_REGIONS uint8_t* o = hp->background_first_overflow (current_min_add, seg, concurrent_p, small_object_segments); while ((o < hp->background_seg_end (seg, concurrent_p)) && (o <= current_max_add)) { dprintf (3, ("considering %Ix", (size_t)o)); size_t s; if (concurrent_p && !small_object_segments) { loh_alloc_lock->bgc_mark_set (o); if (((CObjectHeader*)o)->IsFree()) { s = unused_array_size (o); } else { s = size (o); } } else { s = size (o); } if (background_object_marked (o, FALSE) && contain_pointers_or_collectible (o)) { total_marked_objects++; go_through_object_cl (method_table(o), o, s, poo, uint8_t* oo = *poo; background_mark_object (oo THREAD_NUMBER_ARG); ); } if (concurrent_p && !small_object_segments) { loh_alloc_lock->bgc_mark_done (); } o = o + Align (s, align_const); if (concurrent_p) { allow_fgc(); } } #ifdef USE_REGIONS if (current_max_add != 0) #endif //USE_REGIONS { dprintf (2, ("went through overflow objects in segment %Ix (%d) (so far %Id marked)", heap_segment_mem (seg), (small_object_segments ? 0 : 1), total_marked_objects)); } #ifndef USE_REGIONS if (concurrent_p && (seg == hp->saved_overflow_ephemeral_seg)) { break; } #endif //!USE_REGIONS seg = heap_segment_next_in_range (seg); } if (concurrent_p) { current_bgc_state = bgc_overflow_uoh; } dprintf (2, ("h%d: SOH: ov-mo: %Id", heap_number, total_marked_objects)); fire_overflow_event (min_add, max_add, total_marked_objects, i); if (small_object_segments) { concurrent_print_time_delta (concurrent_p ? "Cov SOH" : "Nov SOH"); } total_marked_objects = 0; small_object_segments = FALSE; } } } BOOL gc_heap::background_process_mark_overflow (BOOL concurrent_p) { BOOL grow_mark_array_p = TRUE; if (concurrent_p) { assert (!processed_eph_overflow_p); #ifndef USE_REGIONS if ((background_max_overflow_address != 0) && (background_min_overflow_address != MAX_PTR)) { // We have overflow to process but we know we can't process the ephemeral generations // now (we actually could process till the current gen1 start but since we are going to // make overflow per segment, for now I'll just stop at the saved gen1 start. saved_overflow_ephemeral_seg = ephemeral_heap_segment; background_max_soh_overflow_address = heap_segment_reserved (saved_overflow_ephemeral_seg); background_min_soh_overflow_address = generation_allocation_start (generation_of (max_generation - 1)); } #endif //!USE_REGIONS } else { #ifndef USE_REGIONS assert ((saved_overflow_ephemeral_seg == 0) || ((background_max_soh_overflow_address != 0) && (background_min_soh_overflow_address != MAX_PTR))); #endif //!USE_REGIONS if (!processed_eph_overflow_p) { // if there was no more overflow we just need to process what we didn't process // on the saved ephemeral segment. #ifdef USE_REGIONS if (!background_overflow_p) #else if ((background_max_overflow_address == 0) && (background_min_overflow_address == MAX_PTR)) #endif //USE_REGIONS { dprintf (2, ("final processing mark overflow - no more overflow since last time")); grow_mark_array_p = FALSE; } #ifdef USE_REGIONS background_overflow_p = TRUE; #else background_min_overflow_address = min (background_min_overflow_address, background_min_soh_overflow_address); background_max_overflow_address = max (background_max_overflow_address, background_max_soh_overflow_address); #endif //!USE_REGIONS processed_eph_overflow_p = TRUE; } } BOOL overflow_p = FALSE; recheck: #ifdef USE_REGIONS if (background_overflow_p) #else if ((! ((background_max_overflow_address == 0)) || ! ((background_min_overflow_address == MAX_PTR)))) #endif { overflow_p = TRUE; if (grow_mark_array_p) { // Try to grow the array. size_t new_size = max (MARK_STACK_INITIAL_LENGTH, 2*background_mark_stack_array_length); if ((new_size * sizeof(mark)) > 100*1024) { size_t new_max_size = (get_total_heap_size() / 10) / sizeof(mark); new_size = min(new_max_size, new_size); } grow_bgc_mark_stack (new_size); } else { grow_mark_array_p = TRUE; } #ifdef USE_REGIONS uint8_t* min_add = 0; uint8_t* max_add = 0; background_overflow_p = FALSE; #else uint8_t* min_add = background_min_overflow_address; uint8_t* max_add = background_max_overflow_address; background_max_overflow_address = 0; background_min_overflow_address = MAX_PTR; #endif background_process_mark_overflow_internal (min_add, max_add, concurrent_p); if (!concurrent_p) { goto recheck; } } return overflow_p; } #endif //BACKGROUND_GC inline void gc_heap::mark_through_object (uint8_t* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL) { #ifndef COLLECTIBLE_CLASS UNREFERENCED_PARAMETER(mark_class_object_p); BOOL to_mark_class_object = FALSE; #else //COLLECTIBLE_CLASS BOOL to_mark_class_object = (mark_class_object_p && (is_collectible(oo))); #endif //COLLECTIBLE_CLASS if (contain_pointers (oo) || to_mark_class_object) { dprintf(3,( "Marking through %Ix", (size_t)oo)); size_t s = size (oo); #ifdef COLLECTIBLE_CLASS if (to_mark_class_object) { uint8_t* class_obj = get_class_object (oo); mark_object (class_obj THREAD_NUMBER_ARG); } #endif //COLLECTIBLE_CLASS if (contain_pointers (oo)) { go_through_object_nostart (method_table(oo), oo, s, po, uint8_t* o = *po; mark_object (o THREAD_NUMBER_ARG); ); } } } size_t gc_heap::get_total_heap_size() { size_t total_heap_size = 0; // It's correct to start from max_generation for this method because // generation_sizes will return all SOH sizes when passed max_generation. #ifdef MULTIPLE_HEAPS int hn = 0; for (hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp2 = gc_heap::g_heaps [hn]; for (int i = max_generation; i < total_generation_count; i++) { total_heap_size += hp2->generation_sizes (hp2->generation_of (i)); } } #else for (int i = max_generation; i < total_generation_count; i++) { total_heap_size += generation_sizes (generation_of (i)); } #endif //MULTIPLE_HEAPS return total_heap_size; } size_t gc_heap::get_total_fragmentation() { size_t total_fragmentation = 0; #ifdef MULTIPLE_HEAPS for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps[hn]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS for (int i = 0; i < total_generation_count; i++) { generation* gen = hp->generation_of (i); total_fragmentation += (generation_free_list_space (gen) + generation_free_obj_space (gen)); } } return total_fragmentation; } size_t gc_heap::get_total_gen_fragmentation (int gen_number) { size_t total_fragmentation = 0; #ifdef MULTIPLE_HEAPS for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps[hn]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS generation* gen = hp->generation_of (gen_number); total_fragmentation += (generation_free_list_space (gen) + generation_free_obj_space (gen)); } return total_fragmentation; } size_t gc_heap::get_total_gen_estimated_reclaim (int gen_number) { size_t total_estimated_reclaim = 0; #ifdef MULTIPLE_HEAPS for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps[hn]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS total_estimated_reclaim += hp->estimated_reclaim (gen_number); } return total_estimated_reclaim; } size_t gc_heap::get_total_gen_size (int gen_number) { #ifdef MULTIPLE_HEAPS size_t size = 0; for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps[hn]; size += hp->generation_size (gen_number); } #else size_t size = generation_size (gen_number); #endif //MULTIPLE_HEAPS return size; } size_t gc_heap::committed_size() { size_t total_committed = 0; const size_t kB = 1024; for (int i = get_start_generation_index(); i < total_generation_count; i++) { generation* gen = generation_of (i); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); size_t gen_committed = 0; size_t gen_allocated = 0; while (seg) { uint8_t* start = #ifdef USE_REGIONS get_region_start (seg); #else (uint8_t*)seg; #endif //USE_REGIONS gen_committed += heap_segment_committed (seg) - start; gen_allocated += heap_segment_allocated (seg) - start; seg = heap_segment_next (seg); } dprintf (3, ("h%d committed in gen%d %IdkB, allocated %IdkB, committed-allocated %IdkB", heap_number, i, gen_committed/kB, gen_allocated/kB, (gen_committed - gen_allocated)/kB)); total_committed += gen_committed; } #ifdef USE_REGIONS size_t committed_in_free = 0; for (int kind = basic_free_region; kind < count_free_region_kinds; kind++) { committed_in_free += free_regions[kind].get_size_committed_in_free(); } dprintf (3, ("h%d committed in free %IdkB", heap_number, committed_in_free/kB)); total_committed += committed_in_free; #endif //USE_REGIONS return total_committed; } size_t gc_heap::get_total_committed_size() { size_t total_committed = 0; #ifdef MULTIPLE_HEAPS int hn = 0; for (hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; total_committed += hp->committed_size(); } #else total_committed = committed_size(); #endif //MULTIPLE_HEAPS return total_committed; } size_t gc_heap::uoh_committed_size (int gen_number, size_t* allocated) { generation* gen = generation_of (gen_number); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); size_t total_committed = 0; size_t total_allocated = 0; while (seg) { uint8_t* start = #ifdef USE_REGIONS get_region_start (seg); #else (uint8_t*)seg; #endif //USE_REGIONS total_committed += heap_segment_committed (seg) - start; total_allocated += heap_segment_allocated (seg) - start; seg = heap_segment_next (seg); } *allocated = total_allocated; return total_committed; } void gc_heap::get_memory_info (uint32_t* memory_load, uint64_t* available_physical, uint64_t* available_page_file) { GCToOSInterface::GetMemoryStatus(is_restricted_physical_mem ? total_physical_mem : 0, memory_load, available_physical, available_page_file); } //returns TRUE is an overflow happened. BOOL gc_heap::process_mark_overflow(int condemned_gen_number) { size_t last_promoted_bytes = get_promoted_bytes(); BOOL overflow_p = FALSE; recheck: if ((! (max_overflow_address == 0) || ! (min_overflow_address == MAX_PTR))) { overflow_p = TRUE; // Try to grow the array. size_t new_size = max (MARK_STACK_INITIAL_LENGTH, 2*mark_stack_array_length); if ((new_size * sizeof(mark)) > 100*1024) { size_t new_max_size = (get_total_heap_size() / 10) / sizeof(mark); new_size = min(new_max_size, new_size); } if ((mark_stack_array_length < new_size) && ((new_size - mark_stack_array_length) > (mark_stack_array_length / 2))) { mark* tmp = new (nothrow) mark [new_size]; if (tmp) { delete mark_stack_array; mark_stack_array = tmp; mark_stack_array_length = new_size; } } uint8_t* min_add = min_overflow_address; uint8_t* max_add = max_overflow_address; max_overflow_address = 0; min_overflow_address = MAX_PTR; process_mark_overflow_internal (condemned_gen_number, min_add, max_add); goto recheck; } size_t current_promoted_bytes = get_promoted_bytes(); if (current_promoted_bytes != last_promoted_bytes) fire_mark_event (ETW::GC_ROOT_OVERFLOW, current_promoted_bytes, last_promoted_bytes); return overflow_p; } void gc_heap::process_mark_overflow_internal (int condemned_gen_number, uint8_t* min_add, uint8_t* max_add) { #ifdef MULTIPLE_HEAPS int thread = heap_number; #endif //MULTIPLE_HEAPS BOOL full_p = (condemned_gen_number == max_generation); dprintf(3,("Processing Mark overflow [%Ix %Ix]", (size_t)min_add, (size_t)max_add)); size_t obj_count = 0; #ifdef MULTIPLE_HEAPS for (int hi = 0; hi < n_heaps; hi++) { gc_heap* hp = g_heaps [(heap_number + hi) % n_heaps]; #else { gc_heap* hp = 0; #endif //MULTIPLE_HEAPS int gen_limit = full_p ? total_generation_count : condemned_gen_number + 1; for (int i = get_stop_generation_index (condemned_gen_number); i < gen_limit; i++) { generation* gen = hp->generation_of (i); heap_segment* seg = heap_segment_in_range (generation_start_segment (gen)); int align_const = get_alignment_constant (i < uoh_start_generation); PREFIX_ASSUME(seg != NULL); while (seg) { uint8_t* o = max (heap_segment_mem (seg), min_add); uint8_t* end = heap_segment_allocated (seg); while ((o < end) && (o <= max_add)) { assert ((min_add <= o) && (max_add >= o)); dprintf (3, ("considering %Ix", (size_t)o)); if (marked (o)) { mark_through_object (o, TRUE THREAD_NUMBER_ARG); obj_count++; } o = o + Align (size (o), align_const); } seg = heap_segment_next_in_range (seg); } } #ifndef MULTIPLE_HEAPS // we should have found at least one object assert (obj_count > 0); #endif //MULTIPLE_HEAPS } } // Scanning for promotion for dependent handles need special handling. Because the primary holds a strong // reference to the secondary (when the primary itself is reachable) and this can cause a cascading series of // promotions (the secondary of one handle is or promotes the primary of another) we might need to perform the // promotion scan multiple times. // This helper encapsulates the logic to complete all dependent handle promotions when running a server GC. It // also has the effect of processing any mark stack overflow. #ifdef MULTIPLE_HEAPS // When multiple heaps are enabled we have must utilize a more complex algorithm in order to keep all the GC // worker threads synchronized. The algorithms are sufficiently divergent that we have different // implementations based on whether MULTIPLE_HEAPS is defined or not. // // Define some static variables used for synchronization in the method below. These should really be defined // locally but MSVC complains when the VOLATILE macro is expanded into an instantiation of the Volatile class. // // A note about the synchronization used within this method. Communication between the worker threads is // achieved via two shared booleans (defined below). These both act as latches that are transitioned only from // false -> true by unsynchronized code. They are only read or reset to false by a single thread under the // protection of a join. static VOLATILE(BOOL) s_fUnpromotedHandles = FALSE; static VOLATILE(BOOL) s_fUnscannedPromotions = FALSE; static VOLATILE(BOOL) s_fScanRequired; void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p) { // Whenever we call this method there may have been preceding object promotions. So set // s_fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set // based on the how the scanning proceeded). s_fUnscannedPromotions = TRUE; // We don't know how many times we need to loop yet. In particular we can't base the loop condition on // the state of this thread's portion of the dependent handle table. That's because promotions on other // threads could cause handle promotions to become necessary here. Even if there are definitely no more // promotions possible in this thread's handles, we still have to stay in lock-step with those worker // threads that haven't finished yet (each GC worker thread has to join exactly the same number of times // as all the others or they'll get out of step). while (true) { // The various worker threads are all currently racing in this code. We need to work out if at least // one of them think they have work to do this cycle. Each thread needs to rescan its portion of the // dependent handle table when both of the following conditions apply: // 1) At least one (arbitrary) object might have been promoted since the last scan (because if this // object happens to correspond to a primary in one of our handles we might potentially have to // promote the associated secondary). // 2) The table for this thread has at least one handle with a secondary that isn't promoted yet. // // The first condition is represented by s_fUnscannedPromotions. This is always non-zero for the first // iteration of this loop (see comment above) and in subsequent cycles each thread updates this // whenever a mark stack overflow occurs or scanning their dependent handles results in a secondary // being promoted. This value is cleared back to zero in a synchronized fashion in the join that // follows below. Note that we can't read this outside of the join since on any iteration apart from // the first threads will be racing between reading this value and completing their previous // iteration's table scan. // // The second condition is tracked by the dependent handle code itself on a per worker thread basis // (and updated by the GcDhReScan() method). We call GcDhUnpromotedHandlesExist() on each thread to // determine the local value and collect the results into the s_fUnpromotedHandles variable in what is // effectively an OR operation. As per s_fUnscannedPromotions we can't read the final result until // we're safely joined. if (GCScan::GcDhUnpromotedHandlesExist(sc)) s_fUnpromotedHandles = TRUE; // Synchronize all the threads so we can read our state variables safely. The shared variable // s_fScanRequired, indicating whether we should scan the tables or terminate the loop, will be set by // a single thread inside the join. gc_t_join.join(this, gc_join_scan_dependent_handles); if (gc_t_join.joined()) { // We're synchronized so it's safe to read our shared state variables. We update another shared // variable to indicate to all threads whether we'll be scanning for another cycle or terminating // the loop. We scan if there has been at least one object promotion since last time and at least // one thread has a dependent handle table with a potential handle promotion possible. s_fScanRequired = s_fUnscannedPromotions && s_fUnpromotedHandles; // Reset our shared state variables (ready to be set again on this scan or with a good initial // value for the next call if we're terminating the loop). s_fUnscannedPromotions = FALSE; s_fUnpromotedHandles = FALSE; if (!s_fScanRequired) { // We're terminating the loop. Perform any last operations that require single threaded access. if (!initial_scan_p) { // On the second invocation we reconcile all mark overflow ranges across the heaps. This can help // load balance if some of the heaps have an abnormally large workload. uint8_t* all_heaps_max = 0; uint8_t* all_heaps_min = MAX_PTR; int i; for (i = 0; i < n_heaps; i++) { if (all_heaps_max < g_heaps[i]->max_overflow_address) all_heaps_max = g_heaps[i]->max_overflow_address; if (all_heaps_min > g_heaps[i]->min_overflow_address) all_heaps_min = g_heaps[i]->min_overflow_address; } for (i = 0; i < n_heaps; i++) { g_heaps[i]->max_overflow_address = all_heaps_max; g_heaps[i]->min_overflow_address = all_heaps_min; } } } dprintf(3, ("Starting all gc thread mark stack overflow processing")); gc_t_join.restart(); } // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions // being visible. If there really was an overflow (process_mark_overflow returns true) then set the // global flag indicating that at least one object promotion may have occurred (the usual comment // about races applies). (Note it's OK to set this flag even if we're about to terminate the loop and // exit the method since we unconditionally set this variable on method entry anyway). if (process_mark_overflow(condemned_gen_number)) s_fUnscannedPromotions = TRUE; // If we decided that no scan was required we can terminate the loop now. if (!s_fScanRequired) break; // Otherwise we must join with the other workers to ensure that all mark stack overflows have been // processed before we start scanning dependent handle tables (if overflows remain while we scan we // could miss noting the promotion of some primary objects). gc_t_join.join(this, gc_join_rescan_dependent_handles); if (gc_t_join.joined()) { dprintf(3, ("Starting all gc thread for dependent handle promotion")); gc_t_join.restart(); } // If the portion of the dependent handle table managed by this worker has handles that could still be // promoted perform a rescan. If the rescan resulted in at least one promotion note this fact since it // could require a rescan of handles on this or other workers. if (GCScan::GcDhUnpromotedHandlesExist(sc)) if (GCScan::GcDhReScan(sc)) s_fUnscannedPromotions = TRUE; } } #else //MULTIPLE_HEAPS // Non-multiple heap version of scan_dependent_handles: much simpler without the need to keep multiple worker // threads synchronized. void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p) { UNREFERENCED_PARAMETER(initial_scan_p); // Whenever we call this method there may have been preceding object promotions. So set // fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set // based on the how the scanning proceeded). bool fUnscannedPromotions = true; // Loop until there are either no more dependent handles that can have their secondary promoted or we've // managed to perform a scan without promoting anything new. while (GCScan::GcDhUnpromotedHandlesExist(sc) && fUnscannedPromotions) { // On each iteration of the loop start with the assumption that no further objects have been promoted. fUnscannedPromotions = false; // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions // being visible. If there was an overflow (process_mark_overflow returned true) then additional // objects now appear to be promoted and we should set the flag. if (process_mark_overflow(condemned_gen_number)) fUnscannedPromotions = true; // Perform the scan and set the flag if any promotions resulted. if (GCScan::GcDhReScan(sc)) fUnscannedPromotions = true; } // Process any mark stack overflow that may have resulted from scanning handles (or if we didn't need to // scan any handles at all this is the processing of overflows that may have occurred prior to this method // invocation). process_mark_overflow(condemned_gen_number); } #endif //MULTIPLE_HEAPS size_t gc_heap::get_generation_start_size (int gen_number) { #ifdef USE_REGIONS return 0; #else return Align (size (generation_allocation_start (generation_of (gen_number))), get_alignment_constant (gen_number <= max_generation)); #endif //!USE_REGIONS } inline int gc_heap::get_num_heaps() { #ifdef MULTIPLE_HEAPS return n_heaps; #else return 1; #endif //MULTIPLE_HEAPS } BOOL gc_heap::decide_on_promotion_surv (size_t threshold) { #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS dynamic_data* dd = hp->dynamic_data_of (min ((settings.condemned_generation + 1), max_generation)); size_t older_gen_size = dd_current_size (dd) + (dd_desired_allocation (dd) - dd_new_allocation (dd)); size_t promoted = hp->total_promoted_bytes; dprintf (2, ("promotion threshold: %Id, promoted bytes: %Id size n+1: %Id", threshold, promoted, older_gen_size)); if ((threshold > (older_gen_size)) || (promoted > threshold)) { return TRUE; } } return FALSE; } inline void gc_heap::fire_mark_event (int root_type, size_t& current_promoted_bytes, size_t& last_promoted_bytes) { #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { current_promoted_bytes = get_promoted_bytes(); size_t root_promoted = current_promoted_bytes - last_promoted_bytes; dprintf (3, ("h%d marked root %s: %Id (%Id - %Id)", heap_number, str_root_kinds[root_type], root_promoted, current_promoted_bytes, last_promoted_bytes)); FIRE_EVENT(GCMarkWithType, heap_number, root_type, root_promoted); last_promoted_bytes = current_promoted_bytes; } #endif // FEATURE_EVENT_TRACE } #ifdef FEATURE_EVENT_TRACE inline void gc_heap::record_mark_time (uint64_t& mark_time, uint64_t& current_mark_time, uint64_t& last_mark_time) { if (informational_event_enabled_p) { current_mark_time = GetHighPrecisionTimeStamp(); mark_time = limit_time_to_uint32 (current_mark_time - last_mark_time); dprintf (3, ("%I64d - %I64d = %I64d", current_mark_time, last_mark_time, (current_mark_time - last_mark_time))); last_mark_time = current_mark_time; } } #endif // FEATURE_EVENT_TRACE void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p) { assert (settings.concurrent == FALSE); ScanContext sc; sc.thread_number = heap_number; sc.promotion = TRUE; sc.concurrent = FALSE; dprintf (2, (ThreadStressLog::gcStartMarkMsg(), heap_number, condemned_gen_number)); BOOL full_p = (condemned_gen_number == max_generation); int gen_to_init = condemned_gen_number; if (condemned_gen_number == max_generation) { gen_to_init = total_generation_count - 1; } for (int gen_idx = 0; gen_idx <= gen_to_init; gen_idx++) { dynamic_data* dd = dynamic_data_of (gen_idx); dd_begin_data_size (dd) = generation_size (gen_idx) - dd_fragmentation (dd) - #ifdef USE_REGIONS 0; #else get_generation_start_size (gen_idx); #endif //USE_REGIONS dprintf (2, ("begin data size for gen%d is %Id", gen_idx, dd_begin_data_size (dd))); dd_survived_size (dd) = 0; dd_pinned_survived_size (dd) = 0; dd_artificial_pinned_survived_size (dd) = 0; dd_added_pinned_size (dd) = 0; #ifdef SHORT_PLUGS dd_padding_size (dd) = 0; #endif //SHORT_PLUGS #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN) dd_num_npinned_plugs (dd) = 0; #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN } if (gen0_must_clear_bricks > 0) gen0_must_clear_bricks--; size_t last_promoted_bytes = 0; size_t current_promoted_bytes = 0; #if !defined(USE_REGIONS) || defined(_DEBUG) init_promoted_bytes(); #endif //!USE_REGIONS || _DEBUG reset_mark_stack(); #ifdef SNOOP_STATS memset (&snoop_stat, 0, sizeof(snoop_stat)); snoop_stat.heap_index = heap_number; #endif //SNOOP_STATS #ifdef MH_SC_MARK if (full_p) { //initialize the mark stack for (int i = 0; i < max_snoop_level; i++) { ((uint8_t**)(mark_stack_array))[i] = 0; } mark_stack_busy() = 1; } #endif //MH_SC_MARK static uint32_t num_sizedrefs = 0; #ifdef MH_SC_MARK static BOOL do_mark_steal_p = FALSE; #endif //MH_SC_MARK #ifdef FEATURE_CARD_MARKING_STEALING reset_card_marking_enumerators(); #endif // FEATURE_CARD_MARKING_STEALING #ifdef STRESS_REGIONS heap_segment* gen0_region = generation_start_segment (generation_of (0)); while (gen0_region) { size_t gen0_region_size = heap_segment_allocated (gen0_region) - heap_segment_mem (gen0_region); if (gen0_region_size > 0) { if ((num_gen0_regions % pinning_seg_interval) == 0) { dprintf (REGIONS_LOG, ("h%d potentially creating pinning in region %Ix", heap_number, heap_segment_mem (gen0_region))); int align_const = get_alignment_constant (TRUE); // Pinning the first and the middle object in the region. uint8_t* boundary = heap_segment_mem (gen0_region); uint8_t* obj_to_pin = boundary; int num_pinned_objs = 0; while (obj_to_pin < heap_segment_allocated (gen0_region)) { if (obj_to_pin >= boundary && !((CObjectHeader*)obj_to_pin)->IsFree()) { pin_by_gc (obj_to_pin); num_pinned_objs++; if (num_pinned_objs >= 2) break; boundary += (gen0_region_size / 2) + 1; } obj_to_pin += Align (size (obj_to_pin), align_const); } } } num_gen0_regions++; gen0_region = heap_segment_next (gen0_region); } #endif //STRESS_REGIONS #ifdef FEATURE_EVENT_TRACE static uint64_t current_mark_time = 0; static uint64_t last_mark_time = 0; #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS gc_t_join.join(this, gc_join_begin_mark_phase); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { maxgen_size_inc_p = false; #ifdef USE_REGIONS special_sweep_p = false; region_count = global_region_allocator.get_used_region_count(); grow_mark_list_piece(); #endif //USE_REGIONS GCToEEInterface::BeforeGcScanRoots(condemned_gen_number, /* is_bgc */ false, /* is_concurrent */ false); num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles(); #ifdef FEATURE_EVENT_TRACE informational_event_enabled_p = EVENT_ENABLED (GCMarkWithType); if (informational_event_enabled_p) { last_mark_time = GetHighPrecisionTimeStamp(); // We may not have SizedRefs to mark so init it to 0. gc_time_info[time_mark_sizedref] = 0; } #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS #ifdef MH_SC_MARK if (full_p) { size_t total_heap_size = get_total_heap_size(); if (total_heap_size > (100 * 1024 * 1024)) { do_mark_steal_p = TRUE; } else { do_mark_steal_p = FALSE; } } else { do_mark_steal_p = FALSE; } #endif //MH_SC_MARK gc_t_join.restart(); #endif //MULTIPLE_HEAPS } { //set up the mark lists from g_mark_list assert (g_mark_list); #ifdef MULTIPLE_HEAPS mark_list = &g_mark_list [heap_number*mark_list_size]; #else mark_list = g_mark_list; #endif //MULTIPLE_HEAPS //dont use the mark list for full gc //because multiple segments are more complex to handle and the list //is likely to overflow if (condemned_gen_number < max_generation) mark_list_end = &mark_list [mark_list_size-1]; else mark_list_end = &mark_list [0]; mark_list_index = &mark_list [0]; #ifdef USE_REGIONS if (g_mark_list_piece != nullptr) { #ifdef MULTIPLE_HEAPS // two arrays with alloc_count entries per heap mark_list_piece_start = &g_mark_list_piece[heap_number * 2 * g_mark_list_piece_size]; mark_list_piece_end = &mark_list_piece_start[g_mark_list_piece_size]; #endif //MULTIPLE_HEAPS survived_per_region = (size_t*)&g_mark_list_piece[heap_number * 2 * g_mark_list_piece_size]; old_card_survived_per_region = (size_t*)&survived_per_region[g_mark_list_piece_size]; size_t region_info_to_clear = region_count * sizeof (size_t); memset (survived_per_region, 0, region_info_to_clear); memset (old_card_survived_per_region, 0, region_info_to_clear); } else { #ifdef MULTIPLE_HEAPS // disable use of mark list altogether mark_list_piece_start = nullptr; mark_list_piece_end = nullptr; mark_list_end = &mark_list[0]; #endif //MULTIPLE_HEAPS survived_per_region = nullptr; old_card_survived_per_region = nullptr; } #endif // USE_REGIONS && MULTIPLE_HEAPS #ifndef MULTIPLE_HEAPS shigh = (uint8_t*) 0; slow = MAX_PTR; #endif //MULTIPLE_HEAPS if ((condemned_gen_number == max_generation) && (num_sizedrefs > 0)) { GCScan::GcScanSizedRefs(GCHeap::Promote, condemned_gen_number, max_generation, &sc); fire_mark_event (ETW::GC_ROOT_SIZEDREF, current_promoted_bytes, last_promoted_bytes); #ifdef MULTIPLE_HEAPS gc_t_join.join(this, gc_join_scan_sizedref_done); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_EVENT_TRACE record_mark_time (gc_time_info[time_mark_sizedref], current_mark_time, last_mark_time); #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS dprintf(3, ("Done with marking all sized refs. Starting all gc thread for marking other strong roots")); gc_t_join.restart(); #endif //MULTIPLE_HEAPS } } dprintf(3,("Marking Roots")); GCScan::GcScanRoots(GCHeap::Promote, condemned_gen_number, max_generation, &sc); fire_mark_event (ETW::GC_ROOT_STACK, current_promoted_bytes, last_promoted_bytes); #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { scan_background_roots (GCHeap::Promote, heap_number, &sc); fire_mark_event (ETW::GC_ROOT_BGC, current_promoted_bytes, last_promoted_bytes); } #endif //BACKGROUND_GC #ifdef FEATURE_PREMORTEM_FINALIZATION dprintf(3, ("Marking finalization data")); finalize_queue->GcScanRoots(GCHeap::Promote, heap_number, 0); fire_mark_event (ETW::GC_ROOT_FQ, current_promoted_bytes, last_promoted_bytes); #endif // FEATURE_PREMORTEM_FINALIZATION dprintf(3,("Marking handle table")); GCScan::GcScanHandles(GCHeap::Promote, condemned_gen_number, max_generation, &sc); fire_mark_event (ETW::GC_ROOT_HANDLES, current_promoted_bytes, last_promoted_bytes); if (!full_p) { #ifdef USE_REGIONS save_current_survived(); #endif //USE_REGIONS #ifdef FEATURE_CARD_MARKING_STEALING n_eph_soh = 0; n_gen_soh = 0; n_eph_loh = 0; n_gen_loh = 0; #endif //FEATURE_CARD_MARKING_STEALING #ifdef CARD_BUNDLE #ifdef MULTIPLE_HEAPS if (gc_t_join.r_join(this, gc_r_join_update_card_bundle)) { #endif //MULTIPLE_HEAPS #ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES // If we are manually managing card bundles, every write to the card table should already be // accounted for in the card bundle table so there's nothing to update here. update_card_table_bundle(); #endif if (card_bundles_enabled()) { verify_card_bundles(); } #ifdef MULTIPLE_HEAPS gc_t_join.r_restart(); } #endif //MULTIPLE_HEAPS #endif //CARD_BUNDLE card_fn mark_object_fn = &gc_heap::mark_object_simple; #ifdef HEAP_ANALYZE heap_analyze_success = TRUE; if (heap_analyze_enabled) { internal_root_array_index = 0; current_obj = 0; current_obj_size = 0; mark_object_fn = &gc_heap::ha_mark_object_simple; } #endif //HEAP_ANALYZE #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) if (!card_mark_done_soh) #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING { dprintf (3, ("Marking cross generation pointers on heap %d", heap_number)); mark_through_cards_for_segments(mark_object_fn, FALSE THIS_ARG); #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) card_mark_done_soh = true; #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING } #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) if (!card_mark_done_uoh) #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING { dprintf (3, ("Marking cross generation pointers for uoh objects on heap %d", heap_number)); for (int i = uoh_start_generation; i < total_generation_count; i++) { #ifndef ALLOW_REFERENCES_IN_POH if (i != poh_generation) #endif //ALLOW_REFERENCES_IN_POH mark_through_cards_for_uoh_objects(mark_object_fn, i, FALSE THIS_ARG); } #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) card_mark_done_uoh = true; #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING } #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) // check the other heaps cyclically and try to help out where the marking isn't done for (int i = 0; i < gc_heap::n_heaps; i++) { int heap_number_to_look_at = (i + heap_number) % gc_heap::n_heaps; gc_heap* hp = gc_heap::g_heaps[heap_number_to_look_at]; if (!hp->card_mark_done_soh) { dprintf(3, ("Marking cross generation pointers on heap %d", hp->heap_number)); hp->mark_through_cards_for_segments(mark_object_fn, FALSE THIS_ARG); hp->card_mark_done_soh = true; } if (!hp->card_mark_done_uoh) { dprintf(3, ("Marking cross generation pointers for large objects on heap %d", hp->heap_number)); for (int i = uoh_start_generation; i < total_generation_count; i++) { #ifndef ALLOW_REFERENCES_IN_POH if (i != poh_generation) #endif //ALLOW_REFERENCES_IN_POH hp->mark_through_cards_for_uoh_objects(mark_object_fn, i, FALSE THIS_ARG); } hp->card_mark_done_uoh = true; } } #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING #ifdef USE_REGIONS update_old_card_survived(); #endif //USE_REGIONS fire_mark_event (ETW::GC_ROOT_OLDER, current_promoted_bytes, last_promoted_bytes); } } #ifdef MH_SC_MARK if (do_mark_steal_p) { mark_steal(); fire_mark_event (ETW::GC_ROOT_STEAL, current_promoted_bytes, last_promoted_bytes); } #endif //MH_SC_MARK // Dependent handles need to be scanned with a special algorithm (see the header comment on // scan_dependent_handles for more detail). We perform an initial scan without synchronizing with other // worker threads or processing any mark stack overflow. This is not guaranteed to complete the operation // but in a common case (where there are no dependent handles that are due to be collected) it allows us // to optimize away further scans. The call to scan_dependent_handles is what will cycle through more // iterations if required and will also perform processing of any mark stack overflow once the dependent // handle table has been fully promoted. GCScan::GcDhInitialScan(GCHeap::Promote, condemned_gen_number, max_generation, &sc); scan_dependent_handles(condemned_gen_number, &sc, true); fire_mark_event (ETW::GC_ROOT_DH_HANDLES, current_promoted_bytes, last_promoted_bytes); #ifdef MULTIPLE_HEAPS dprintf(3, ("Joining for short weak handle scan")); gc_t_join.join(this, gc_join_null_dead_short_weak); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_EVENT_TRACE record_mark_time (gc_time_info[time_mark_roots], current_mark_time, last_mark_time); #endif //FEATURE_EVENT_TRACE uint64_t promoted_bytes_global = 0; #ifdef HEAP_ANALYZE heap_analyze_enabled = FALSE; #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { promoted_bytes_global += g_heaps[i]->get_promoted_bytes(); } #else promoted_bytes_global = get_promoted_bytes(); #endif //MULTIPLE_HEAPS GCToEEInterface::AnalyzeSurvivorsFinished (settings.gc_index, condemned_gen_number, promoted_bytes_global, GCHeap::ReportGenerationBounds); #endif // HEAP_ANALYZE GCToEEInterface::AfterGcScanRoots (condemned_gen_number, max_generation, &sc); #ifdef MULTIPLE_HEAPS if (!full_p) { // we used r_join and need to reinitialize states for it here. gc_t_join.r_init(); } dprintf(3, ("Starting all gc thread for short weak handle scan")); gc_t_join.restart(); #endif //MULTIPLE_HEAPS } #ifdef FEATURE_CARD_MARKING_STEALING reset_card_marking_enumerators(); if (!full_p) { int generation_skip_ratio_soh = ((n_eph_soh > MIN_SOH_CROSS_GEN_REFS) ? (int)(((float)n_gen_soh / (float)n_eph_soh) * 100) : 100); int generation_skip_ratio_loh = ((n_eph_loh > MIN_LOH_CROSS_GEN_REFS) ? (int)(((float)n_gen_loh / (float)n_eph_loh) * 100) : 100); generation_skip_ratio = min (generation_skip_ratio_soh, generation_skip_ratio_loh); dprintf (2, ("h%d skip ratio soh: %d, loh: %d", heap_number, generation_skip_ratio_soh, generation_skip_ratio_loh)); } #endif // FEATURE_CARD_MARKING_STEALING // null out the target of short weakref that were not promoted. GCScan::GcShortWeakPtrScan (condemned_gen_number, max_generation,&sc); #ifdef MULTIPLE_HEAPS dprintf(3, ("Joining for finalization")); gc_t_join.join(this, gc_join_scan_finalization); if (gc_t_join.joined()) { #endif //MULTIPLE_HEAPS #ifdef FEATURE_EVENT_TRACE record_mark_time (gc_time_info[time_mark_short_weak], current_mark_time, last_mark_time); #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS dprintf(3, ("Starting all gc thread for Finalization")); gc_t_join.restart(); } #endif //MULTIPLE_HEAPS //Handle finalization. size_t promoted_bytes_live = get_promoted_bytes(); #ifdef FEATURE_PREMORTEM_FINALIZATION dprintf (3, ("Finalize marking")); finalize_queue->ScanForFinalization (GCHeap::Promote, condemned_gen_number, mark_only_p, __this); fire_mark_event (ETW::GC_ROOT_NEW_FQ, current_promoted_bytes, last_promoted_bytes); GCToEEInterface::DiagWalkFReachableObjects(__this); // Scan dependent handles again to promote any secondaries associated with primaries that were promoted // for finalization. As before scan_dependent_handles will also process any mark stack overflow. scan_dependent_handles(condemned_gen_number, &sc, false); fire_mark_event (ETW::GC_ROOT_DH_HANDLES, current_promoted_bytes, last_promoted_bytes); #endif //FEATURE_PREMORTEM_FINALIZATION total_promoted_bytes = get_promoted_bytes(); #ifdef MULTIPLE_HEAPS static VOLATILE(int32_t) syncblock_scan_p; dprintf(3, ("Joining for weak pointer deletion")); gc_t_join.join(this, gc_join_null_dead_long_weak); if (gc_t_join.joined()) { dprintf(3, ("Starting all gc thread for weak pointer deletion")); #endif //MULTIPLE_HEAPS #ifdef FEATURE_EVENT_TRACE record_mark_time (gc_time_info[time_mark_scan_finalization], current_mark_time, last_mark_time); #endif //FEATURE_EVENT_TRACE #ifdef USE_REGIONS sync_promoted_bytes(); equalize_promoted_bytes(); #endif //USE_REGIONS #ifdef MULTIPLE_HEAPS syncblock_scan_p = 0; gc_t_join.restart(); } #endif //MULTIPLE_HEAPS // null out the target of long weakref that were not promoted. GCScan::GcWeakPtrScan (condemned_gen_number, max_generation, &sc); #ifdef MULTIPLE_HEAPS size_t total_mark_list_size = sort_mark_list(); // first thread to finish sorting will scan the sync syncblk cache if ((syncblock_scan_p == 0) && (Interlocked::Increment(&syncblock_scan_p) == 1)) #endif //MULTIPLE_HEAPS { // scan for deleted entries in the syncblk cache GCScan::GcWeakPtrScanBySingleThread(condemned_gen_number, max_generation, &sc); } #ifdef MULTIPLE_HEAPS dprintf (3, ("Joining for sync block cache entry scanning")); gc_t_join.join(this, gc_join_null_dead_syncblk); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_EVENT_TRACE record_mark_time (gc_time_info[time_plan - 1], current_mark_time, last_mark_time); gc_time_info[time_plan] = last_mark_time; #endif //FEATURE_EVENT_TRACE //decide on promotion if (!settings.promotion) { size_t m = 0; for (int n = 0; n <= condemned_gen_number;n++) { #ifdef MULTIPLE_HEAPS m += (size_t)(dd_min_size (dynamic_data_of (n))*(n+1)*0.1); #else m += (size_t)(dd_min_size (dynamic_data_of (n))*(n+1)*0.06); #endif //MULTIPLE_HEAPS } settings.promotion = decide_on_promotion_surv (m); } #ifdef MULTIPLE_HEAPS #ifdef SNOOP_STATS if (do_mark_steal_p) { size_t objects_checked_count = 0; size_t zero_ref_count = 0; size_t objects_marked_count = 0; size_t check_level_count = 0; size_t busy_count = 0; size_t interlocked_count = 0; size_t partial_mark_parent_count = 0; size_t stolen_or_pm_count = 0; size_t stolen_entry_count = 0; size_t pm_not_ready_count = 0; size_t normal_count = 0; size_t stack_bottom_clear_count = 0; for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; hp->print_snoop_stat(); objects_checked_count += hp->snoop_stat.objects_checked_count; zero_ref_count += hp->snoop_stat.zero_ref_count; objects_marked_count += hp->snoop_stat.objects_marked_count; check_level_count += hp->snoop_stat.check_level_count; busy_count += hp->snoop_stat.busy_count; interlocked_count += hp->snoop_stat.interlocked_count; partial_mark_parent_count += hp->snoop_stat.partial_mark_parent_count; stolen_or_pm_count += hp->snoop_stat.stolen_or_pm_count; stolen_entry_count += hp->snoop_stat.stolen_entry_count; pm_not_ready_count += hp->snoop_stat.pm_not_ready_count; normal_count += hp->snoop_stat.normal_count; stack_bottom_clear_count += hp->snoop_stat.stack_bottom_clear_count; } fflush (stdout); printf ("-------total stats-------\n"); printf ("%8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s\n", "checked", "zero", "marked", "level", "busy", "xchg", "pmparent", "s_pm", "stolen", "nready", "normal", "clear"); printf ("%8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d\n", objects_checked_count, zero_ref_count, objects_marked_count, check_level_count, busy_count, interlocked_count, partial_mark_parent_count, stolen_or_pm_count, stolen_entry_count, pm_not_ready_count, normal_count, stack_bottom_clear_count); } #endif //SNOOP_STATS dprintf(3, ("Starting all threads for end of mark phase")); gc_t_join.restart(); #endif //MULTIPLE_HEAPS } #if defined(MULTIPLE_HEAPS) && !defined(USE_REGIONS) merge_mark_lists (total_mark_list_size); #endif //MULTIPLE_HEAPS && !USE_REGIONS finalization_promoted_bytes = total_promoted_bytes - promoted_bytes_live; dprintf(2,("---- End of mark phase ----")); } inline void gc_heap::pin_object (uint8_t* o, uint8_t** ppObject) { dprintf (3, ("Pinning %Ix->%Ix", (size_t)ppObject, (size_t)o)); set_pinned (o); #ifdef FEATURE_EVENT_TRACE if(EVENT_ENABLED(PinObjectAtGCTime)) { fire_etw_pin_object_event(o, ppObject); } #endif // FEATURE_EVENT_TRACE num_pinned_objects++; } size_t gc_heap::get_total_pinned_objects() { #ifdef MULTIPLE_HEAPS size_t total_num_pinned_objects = 0; for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; total_num_pinned_objects += hp->num_pinned_objects; } return total_num_pinned_objects; #else //MULTIPLE_HEAPS return num_pinned_objects; #endif //MULTIPLE_HEAPS } void gc_heap::reinit_pinned_objects() { #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap::g_heaps[i]->num_pinned_objects = 0; } #else //MULTIPLE_HEAPS num_pinned_objects = 0; #endif //MULTIPLE_HEAPS } void gc_heap::reset_mark_stack () { reset_pinned_queue(); max_overflow_address = 0; min_overflow_address = MAX_PTR; } #ifdef FEATURE_STRUCTALIGN // // The word with left child, right child, and align info is laid out as follows: // // | upper short word | lower short word | // |<------------> <----->|<------------> <----->| // | left child info hi| right child info lo| // x86: | 10 bits 6 bits| 10 bits 6 bits| // // where left/right child are signed values and concat(info hi, info lo) is unsigned. // // The "align info" encodes two numbers: the required alignment (a power of two) // and the misalignment (the number of machine words the destination address needs // to be adjusted by to provide alignment - so this number is always smaller than // the required alignment). Thus, the two can be represented as the "logical or" // of the two numbers. Note that the actual pad is computed from the misalignment // by adding the alignment iff the misalignment is non-zero and less than min_obj_size. // // The number of bits in a brick. #if defined (TARGET_AMD64) #define brick_bits (12) #else #define brick_bits (11) #endif //TARGET_AMD64 C_ASSERT(brick_size == (1 << brick_bits)); // The number of bits needed to represent the offset to a child node. // "brick_bits + 1" allows us to represent a signed offset within a brick. #define child_bits (brick_bits + 1 - LOG2_PTRSIZE) // The number of bits in each of the pad hi, pad lo fields. #define pad_bits (sizeof(short) * 8 - child_bits) #define child_from_short(w) (((signed short)(w) / (1 << (pad_bits - LOG2_PTRSIZE))) & ~((1 << LOG2_PTRSIZE) - 1)) #define pad_mask ((1 << pad_bits) - 1) #define pad_from_short(w) ((size_t)(w) & pad_mask) #else // FEATURE_STRUCTALIGN #define child_from_short(w) (w) #endif // FEATURE_STRUCTALIGN inline short node_left_child(uint8_t* node) { return child_from_short(((plug_and_pair*)node)[-1].m_pair.left); } inline void set_node_left_child(uint8_t* node, ptrdiff_t val) { assert (val > -(ptrdiff_t)brick_size); assert (val < (ptrdiff_t)brick_size); assert (Aligned (val)); #ifdef FEATURE_STRUCTALIGN size_t pad = pad_from_short(((plug_and_pair*)node)[-1].m_pair.left); ((plug_and_pair*)node)[-1].m_pair.left = ((short)val << (pad_bits - LOG2_PTRSIZE)) | (short)pad; #else // FEATURE_STRUCTALIGN ((plug_and_pair*)node)[-1].m_pair.left = (short)val; #endif // FEATURE_STRUCTALIGN assert (node_left_child (node) == val); } inline short node_right_child(uint8_t* node) { return child_from_short(((plug_and_pair*)node)[-1].m_pair.right); } inline void set_node_right_child(uint8_t* node, ptrdiff_t val) { assert (val > -(ptrdiff_t)brick_size); assert (val < (ptrdiff_t)brick_size); assert (Aligned (val)); #ifdef FEATURE_STRUCTALIGN size_t pad = pad_from_short(((plug_and_pair*)node)[-1].m_pair.right); ((plug_and_pair*)node)[-1].m_pair.right = ((short)val << (pad_bits - LOG2_PTRSIZE)) | (short)pad; #else // FEATURE_STRUCTALIGN ((plug_and_pair*)node)[-1].m_pair.right = (short)val; #endif // FEATURE_STRUCTALIGN assert (node_right_child (node) == val); } #ifdef FEATURE_STRUCTALIGN void node_aligninfo (uint8_t* node, int& requiredAlignment, ptrdiff_t& pad) { // Extract the single-number aligninfo from the fields. short left = ((plug_and_pair*)node)[-1].m_pair.left; short right = ((plug_and_pair*)node)[-1].m_pair.right; ptrdiff_t pad_shifted = (pad_from_short(left) << pad_bits) | pad_from_short(right); ptrdiff_t aligninfo = pad_shifted * DATA_ALIGNMENT; // Replicate the topmost bit into all lower bits. ptrdiff_t x = aligninfo; x |= x >> 8; x |= x >> 4; x |= x >> 2; x |= x >> 1; // Clear all bits but the highest. requiredAlignment = (int)(x ^ (x >> 1)); pad = aligninfo - requiredAlignment; pad += AdjustmentForMinPadSize(pad, requiredAlignment); } inline ptrdiff_t node_alignpad (uint8_t* node) { int requiredAlignment; ptrdiff_t alignpad; node_aligninfo (node, requiredAlignment, alignpad); return alignpad; } void clear_node_aligninfo (uint8_t* node) { ((plug_and_pair*)node)[-1].m_pair.left &= ~0 << pad_bits; ((plug_and_pair*)node)[-1].m_pair.right &= ~0 << pad_bits; } void set_node_aligninfo (uint8_t* node, int requiredAlignment, ptrdiff_t pad) { // Encode the alignment requirement and alignment offset as a single number // as described above. ptrdiff_t aligninfo = (size_t)requiredAlignment + (pad & (requiredAlignment-1)); assert (Aligned (aligninfo)); ptrdiff_t aligninfo_shifted = aligninfo / DATA_ALIGNMENT; assert (aligninfo_shifted < (1 << (pad_bits + pad_bits))); ptrdiff_t hi = aligninfo_shifted >> pad_bits; assert (pad_from_short(((plug_and_gap*)node)[-1].m_pair.left) == 0); ((plug_and_pair*)node)[-1].m_pair.left |= hi; ptrdiff_t lo = aligninfo_shifted & pad_mask; assert (pad_from_short(((plug_and_gap*)node)[-1].m_pair.right) == 0); ((plug_and_pair*)node)[-1].m_pair.right |= lo; #ifdef _DEBUG int requiredAlignment2; ptrdiff_t pad2; node_aligninfo (node, requiredAlignment2, pad2); assert (requiredAlignment == requiredAlignment2); assert (pad == pad2); #endif // _DEBUG } #endif // FEATURE_STRUCTALIGN inline void loh_set_node_relocation_distance(uint8_t* node, ptrdiff_t val) { ptrdiff_t* place = &(((loh_obj_and_pad*)node)[-1].reloc); *place = val; } inline ptrdiff_t loh_node_relocation_distance(uint8_t* node) { return (((loh_obj_and_pad*)node)[-1].reloc); } inline ptrdiff_t node_relocation_distance (uint8_t* node) { return (((plug_and_reloc*)(node))[-1].reloc & ~3); } inline void set_node_relocation_distance(uint8_t* node, ptrdiff_t val) { assert (val == (val & ~3)); ptrdiff_t* place = &(((plug_and_reloc*)node)[-1].reloc); //clear the left bit and the relocation field *place &= 1; *place |= val; } #define node_left_p(node) (((plug_and_reloc*)(node))[-1].reloc & 2) #define set_node_left(node) ((plug_and_reloc*)(node))[-1].reloc |= 2; #ifndef FEATURE_STRUCTALIGN void set_node_realigned(uint8_t* node) { ((plug_and_reloc*)(node))[-1].reloc |= 1; } void clear_node_realigned(uint8_t* node) { #ifdef RESPECT_LARGE_ALIGNMENT ((plug_and_reloc*)(node))[-1].reloc &= ~1; #else UNREFERENCED_PARAMETER(node); #endif //RESPECT_LARGE_ALIGNMENT } #endif // FEATURE_STRUCTALIGN inline size_t node_gap_size (uint8_t* node) { return ((plug_and_gap *)node)[-1].gap; } void set_gap_size (uint8_t* node, size_t size) { assert (Aligned (size)); // clear the 2 uint32_t used by the node. ((plug_and_gap *)node)[-1].reloc = 0; ((plug_and_gap *)node)[-1].lr =0; ((plug_and_gap *)node)[-1].gap = size; assert ((size == 0 )||(size >= sizeof(plug_and_reloc))); } uint8_t* gc_heap::insert_node (uint8_t* new_node, size_t sequence_number, uint8_t* tree, uint8_t* last_node) { dprintf (3, ("IN: %Ix(%Ix), T: %Ix(%Ix), L: %Ix(%Ix) [%Ix]", (size_t)new_node, brick_of(new_node), (size_t)tree, brick_of(tree), (size_t)last_node, brick_of(last_node), sequence_number)); if (power_of_two_p (sequence_number)) { set_node_left_child (new_node, (tree - new_node)); dprintf (3, ("NT: %Ix, LC->%Ix", (size_t)new_node, (tree - new_node))); tree = new_node; } else { if (oddp (sequence_number)) { set_node_right_child (last_node, (new_node - last_node)); dprintf (3, ("%Ix RC->%Ix", last_node, (new_node - last_node))); } else { uint8_t* earlier_node = tree; size_t imax = logcount(sequence_number) - 2; for (size_t i = 0; i != imax; i++) { earlier_node = earlier_node + node_right_child (earlier_node); } int tmp_offset = node_right_child (earlier_node); assert (tmp_offset); // should never be empty set_node_left_child (new_node, ((earlier_node + tmp_offset ) - new_node)); set_node_right_child (earlier_node, (new_node - earlier_node)); dprintf (3, ("%Ix LC->%Ix, %Ix RC->%Ix", new_node, ((earlier_node + tmp_offset ) - new_node), earlier_node, (new_node - earlier_node))); } } return tree; } size_t gc_heap::update_brick_table (uint8_t* tree, size_t current_brick, uint8_t* x, uint8_t* plug_end) { dprintf (3, ("tree: %Ix, current b: %Ix, x: %Ix, plug_end: %Ix", tree, current_brick, x, plug_end)); if (tree != NULL) { dprintf (3, ("b- %Ix->%Ix pointing to tree %Ix", current_brick, (size_t)(tree - brick_address (current_brick)), tree)); set_brick (current_brick, (tree - brick_address (current_brick))); } else { dprintf (3, ("b- %Ix->-1", current_brick)); set_brick (current_brick, -1); } size_t b = 1 + current_brick; ptrdiff_t offset = 0; size_t last_br = brick_of (plug_end-1); current_brick = brick_of (x-1); dprintf (3, ("ubt: %Ix->%Ix]->%Ix]", b, last_br, current_brick)); while (b <= current_brick) { if (b <= last_br) { set_brick (b, --offset); } else { set_brick (b,-1); } b++; } return brick_of (x); } #ifndef USE_REGIONS void gc_heap::plan_generation_start (generation* gen, generation* consing_gen, uint8_t* next_plug_to_allocate) { #ifdef HOST_64BIT // We should never demote big plugs to gen0. if (gen == youngest_generation) { heap_segment* seg = ephemeral_heap_segment; size_t mark_stack_large_bos = mark_stack_bos; size_t large_plug_pos = 0; while (mark_stack_large_bos < mark_stack_tos) { if (mark_stack_array[mark_stack_large_bos].len > demotion_plug_len_th) { while (mark_stack_bos <= mark_stack_large_bos) { size_t entry = deque_pinned_plug(); size_t len = pinned_len (pinned_plug_of (entry)); uint8_t* plug = pinned_plug (pinned_plug_of(entry)); if (len > demotion_plug_len_th) { dprintf (2, ("ps(%d): S %Ix (%Id)(%Ix)", gen->gen_num, plug, len, (plug+len))); } pinned_len (pinned_plug_of (entry)) = plug - generation_allocation_pointer (consing_gen); assert(mark_stack_array[entry].len == 0 || mark_stack_array[entry].len >= Align(min_obj_size)); generation_allocation_pointer (consing_gen) = plug + len; generation_allocation_limit (consing_gen) = heap_segment_plan_allocated (seg); set_allocator_next_pin (consing_gen); } } mark_stack_large_bos++; } } #endif // HOST_64BIT generation_plan_allocation_start (gen) = allocate_in_condemned_generations (consing_gen, Align (min_obj_size), -1); generation_plan_allocation_start_size (gen) = Align (min_obj_size); size_t allocation_left = (size_t)(generation_allocation_limit (consing_gen) - generation_allocation_pointer (consing_gen)); if (next_plug_to_allocate) { size_t dist_to_next_plug = (size_t)(next_plug_to_allocate - generation_allocation_pointer (consing_gen)); if (allocation_left > dist_to_next_plug) { allocation_left = dist_to_next_plug; } } if (allocation_left < Align (min_obj_size)) { generation_plan_allocation_start_size (gen) += allocation_left; generation_allocation_pointer (consing_gen) += allocation_left; } dprintf (2, ("plan alloc gen%d(%Ix) start at %Ix (ptr: %Ix, limit: %Ix, next: %Ix)", gen->gen_num, generation_plan_allocation_start (gen), generation_plan_allocation_start_size (gen), generation_allocation_pointer (consing_gen), generation_allocation_limit (consing_gen), next_plug_to_allocate)); } void gc_heap::realloc_plan_generation_start (generation* gen, generation* consing_gen) { BOOL adjacentp = FALSE; generation_plan_allocation_start (gen) = allocate_in_expanded_heap (consing_gen, Align(min_obj_size), adjacentp, 0, #ifdef SHORT_PLUGS FALSE, NULL, #endif //SHORT_PLUGS FALSE, -1 REQD_ALIGN_AND_OFFSET_ARG); generation_plan_allocation_start_size (gen) = Align (min_obj_size); size_t allocation_left = (size_t)(generation_allocation_limit (consing_gen) - generation_allocation_pointer (consing_gen)); if ((allocation_left < Align (min_obj_size)) && (generation_allocation_limit (consing_gen)!=heap_segment_plan_allocated (generation_allocation_segment (consing_gen)))) { generation_plan_allocation_start_size (gen) += allocation_left; generation_allocation_pointer (consing_gen) += allocation_left; } dprintf (1, ("plan re-alloc gen%d start at %Ix (ptr: %Ix, limit: %Ix)", gen->gen_num, generation_plan_allocation_start (consing_gen), generation_allocation_pointer (consing_gen), generation_allocation_limit (consing_gen))); } void gc_heap::plan_generation_starts (generation*& consing_gen) { //make sure that every generation has a planned allocation start int gen_number = settings.condemned_generation; while (gen_number >= 0) { if (gen_number < max_generation) { consing_gen = ensure_ephemeral_heap_segment (consing_gen); } generation* gen = generation_of (gen_number); if (0 == generation_plan_allocation_start (gen)) { plan_generation_start (gen, consing_gen, 0); assert (generation_plan_allocation_start (gen)); } gen_number--; } // now we know the planned allocation size heap_segment_plan_allocated (ephemeral_heap_segment) = generation_allocation_pointer (consing_gen); } void gc_heap::advance_pins_for_demotion (generation* gen) { uint8_t* original_youngest_start = generation_allocation_start (youngest_generation); heap_segment* seg = ephemeral_heap_segment; if ((!(pinned_plug_que_empty_p()))) { size_t gen1_pinned_promoted = generation_pinned_allocation_compact_size (generation_of (max_generation)); size_t gen1_pins_left = dd_pinned_survived_size (dynamic_data_of (max_generation - 1)) - gen1_pinned_promoted; size_t total_space_to_skip = last_gen1_pin_end - generation_allocation_pointer (gen); float pin_frag_ratio = (float)gen1_pins_left / (float)total_space_to_skip; float pin_surv_ratio = (float)gen1_pins_left / (float)(dd_survived_size (dynamic_data_of (max_generation - 1))); if ((pin_frag_ratio > 0.15) && (pin_surv_ratio > 0.30)) { while (!pinned_plug_que_empty_p() && (pinned_plug (oldest_pin()) < original_youngest_start)) { size_t entry = deque_pinned_plug(); size_t len = pinned_len (pinned_plug_of (entry)); uint8_t* plug = pinned_plug (pinned_plug_of(entry)); pinned_len (pinned_plug_of (entry)) = plug - generation_allocation_pointer (gen); assert(mark_stack_array[entry].len == 0 || mark_stack_array[entry].len >= Align(min_obj_size)); generation_allocation_pointer (gen) = plug + len; generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); set_allocator_next_pin (gen); //Add the size of the pinned plug to the right pinned allocations //find out which gen this pinned plug came from int frgn = object_gennum (plug); if ((frgn != (int)max_generation) && settings.promotion) { int togn = object_gennum_plan (plug); generation_pinned_allocation_sweep_size ((generation_of (frgn +1))) += len; if (frgn < togn) { generation_pinned_allocation_compact_size (generation_of (togn)) += len; } } dprintf (2, ("skipping gap %d, pin %Ix (%Id)", pinned_len (pinned_plug_of (entry)), plug, len)); } } dprintf (2, ("ad_p_d: PL: %Id, SL: %Id, pfr: %d, psr: %d", gen1_pins_left, total_space_to_skip, (int)(pin_frag_ratio*100), (int)(pin_surv_ratio*100))); } } void gc_heap::process_ephemeral_boundaries (uint8_t* x, int& active_new_gen_number, int& active_old_gen_number, generation*& consing_gen, BOOL& allocate_in_condemned) { retry: if ((active_old_gen_number > 0) && (x >= generation_allocation_start (generation_of (active_old_gen_number - 1)))) { dprintf (2, ("crossing gen%d, x is %Ix", active_old_gen_number - 1, x)); if (!pinned_plug_que_empty_p()) { dprintf (2, ("oldest pin: %Ix(%Id)", pinned_plug (oldest_pin()), (x - pinned_plug (oldest_pin())))); } if (active_old_gen_number <= (settings.promotion ? (max_generation - 1) : max_generation)) { active_new_gen_number--; } active_old_gen_number--; assert ((!settings.promotion) || (active_new_gen_number>0)); if (active_new_gen_number == (max_generation - 1)) { #ifdef FREE_USAGE_STATS if (settings.condemned_generation == max_generation) { // We need to do this before we skip the rest of the pinned plugs. generation* gen_2 = generation_of (max_generation); generation* gen_1 = generation_of (max_generation - 1); size_t total_num_pinned_free_spaces_left = 0; // We are about to allocate gen1, check to see how efficient fitting in gen2 pinned free spaces is. for (int j = 0; j < NUM_GEN_POWER2; j++) { dprintf (1, ("[h%d][#%Id]2^%d: current: %Id, S: 2: %Id, 1: %Id(%Id)", heap_number, settings.gc_index, (j + 10), gen_2->gen_current_pinned_free_spaces[j], gen_2->gen_plugs[j], gen_1->gen_plugs[j], (gen_2->gen_plugs[j] + gen_1->gen_plugs[j]))); total_num_pinned_free_spaces_left += gen_2->gen_current_pinned_free_spaces[j]; } float pinned_free_list_efficiency = 0; size_t total_pinned_free_space = generation_allocated_in_pinned_free (gen_2) + generation_pinned_free_obj_space (gen_2); if (total_pinned_free_space != 0) { pinned_free_list_efficiency = (float)(generation_allocated_in_pinned_free (gen_2)) / (float)total_pinned_free_space; } dprintf (1, ("[h%d] gen2 allocated %Id bytes with %Id bytes pinned free spaces (effi: %d%%), %Id (%Id) left", heap_number, generation_allocated_in_pinned_free (gen_2), total_pinned_free_space, (int)(pinned_free_list_efficiency * 100), generation_pinned_free_obj_space (gen_2), total_num_pinned_free_spaces_left)); } #endif //FREE_USAGE_STATS //Go past all of the pinned plugs for this generation. while (!pinned_plug_que_empty_p() && (!in_range_for_segment ((pinned_plug (oldest_pin())), ephemeral_heap_segment))) { size_t entry = deque_pinned_plug(); mark* m = pinned_plug_of (entry); uint8_t* plug = pinned_plug (m); size_t len = pinned_len (m); // detect pinned block in different segment (later) than // allocation segment, skip those until the oldest pin is in the ephemeral seg. // adjust the allocation segment along the way (at the end it will // be the ephemeral segment. heap_segment* nseg = heap_segment_in_range (generation_allocation_segment (consing_gen)); PREFIX_ASSUME(nseg != NULL); while (!((plug >= generation_allocation_pointer (consing_gen))&& (plug < heap_segment_allocated (nseg)))) { //adjust the end of the segment to be the end of the plug assert (generation_allocation_pointer (consing_gen)>= heap_segment_mem (nseg)); assert (generation_allocation_pointer (consing_gen)<= heap_segment_committed (nseg)); heap_segment_plan_allocated (nseg) = generation_allocation_pointer (consing_gen); //switch allocation segment nseg = heap_segment_next_rw (nseg); generation_allocation_segment (consing_gen) = nseg; //reset the allocation pointer and limits generation_allocation_pointer (consing_gen) = heap_segment_mem (nseg); } set_new_pin_info (m, generation_allocation_pointer (consing_gen)); assert(pinned_len(m) == 0 || pinned_len(m) >= Align(min_obj_size)); generation_allocation_pointer (consing_gen) = plug + len; generation_allocation_limit (consing_gen) = generation_allocation_pointer (consing_gen); } allocate_in_condemned = TRUE; consing_gen = ensure_ephemeral_heap_segment (consing_gen); } if (active_new_gen_number != max_generation) { if (active_new_gen_number == (max_generation - 1)) { maxgen_pinned_compact_before_advance = generation_pinned_allocation_compact_size (generation_of (max_generation)); if (!demote_gen1_p) advance_pins_for_demotion (consing_gen); } plan_generation_start (generation_of (active_new_gen_number), consing_gen, x); dprintf (2, ("process eph: allocated gen%d start at %Ix", active_new_gen_number, generation_plan_allocation_start (generation_of (active_new_gen_number)))); if ((demotion_low == MAX_PTR) && !pinned_plug_que_empty_p()) { uint8_t* pplug = pinned_plug (oldest_pin()); if (object_gennum (pplug) > 0) { demotion_low = pplug; dprintf (3, ("process eph: dlow->%Ix", demotion_low)); } } assert (generation_plan_allocation_start (generation_of (active_new_gen_number))); } goto retry; } } #endif //!USE_REGIONS inline void gc_heap::seg_clear_mark_bits (heap_segment* seg) { uint8_t* o = heap_segment_mem (seg); while (o < heap_segment_allocated (seg)) { if (marked (o)) { clear_marked (o); } o = o + Align (size (o)); } } #ifdef FEATURE_BASICFREEZE void gc_heap::sweep_ro_segments (heap_segment* start_seg) { //go through all of the segment in range and reset the mark bit heap_segment* seg = start_seg; while (seg) { if (heap_segment_read_only_p (seg) && heap_segment_in_range_p (seg)) { #ifdef BACKGROUND_GC if (settings.concurrent) { seg_clear_mark_array_bits_soh (seg); } else { seg_clear_mark_bits (seg); } #else //BACKGROUND_GC seg_clear_mark_bits (seg); #endif //BACKGROUND_GC } seg = heap_segment_next (seg); } } #endif // FEATURE_BASICFREEZE #ifdef FEATURE_LOH_COMPACTION inline BOOL gc_heap::loh_pinned_plug_que_empty_p() { return (loh_pinned_queue_bos == loh_pinned_queue_tos); } void gc_heap::loh_set_allocator_next_pin() { if (!(loh_pinned_plug_que_empty_p())) { mark* oldest_entry = loh_oldest_pin(); uint8_t* plug = pinned_plug (oldest_entry); generation* gen = large_object_generation; if ((plug >= generation_allocation_pointer (gen)) && (plug < generation_allocation_limit (gen))) { generation_allocation_limit (gen) = pinned_plug (oldest_entry); } else assert (!((plug < generation_allocation_pointer (gen)) && (plug >= heap_segment_mem (generation_allocation_segment (gen))))); } } size_t gc_heap::loh_deque_pinned_plug () { size_t m = loh_pinned_queue_bos; loh_pinned_queue_bos++; return m; } inline mark* gc_heap::loh_pinned_plug_of (size_t bos) { return &loh_pinned_queue[bos]; } inline mark* gc_heap::loh_oldest_pin() { return loh_pinned_plug_of (loh_pinned_queue_bos); } // If we can't grow the queue, then don't compact. BOOL gc_heap::loh_enque_pinned_plug (uint8_t* plug, size_t len) { assert(len >= Align(min_obj_size, get_alignment_constant (FALSE))); if (loh_pinned_queue_length <= loh_pinned_queue_tos) { if (!grow_mark_stack (loh_pinned_queue, loh_pinned_queue_length, LOH_PIN_QUEUE_LENGTH)) { return FALSE; } } dprintf (3, (" P: %Ix(%Id)", plug, len)); mark& m = loh_pinned_queue[loh_pinned_queue_tos]; m.first = plug; m.len = len; loh_pinned_queue_tos++; loh_set_allocator_next_pin(); return TRUE; } inline BOOL gc_heap::loh_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit) { dprintf (1235, ("trying to fit %Id(%Id) between %Ix and %Ix (%Id)", size, (2* AlignQword (loh_padding_obj_size) + size), alloc_pointer, alloc_limit, (alloc_limit - alloc_pointer))); return ((alloc_pointer + 2* AlignQword (loh_padding_obj_size) + size) <= alloc_limit); } uint8_t* gc_heap::loh_allocate_in_condemned (size_t size) { generation* gen = large_object_generation; dprintf (1235, ("E: p:%Ix, l:%Ix, s: %Id", generation_allocation_pointer (gen), generation_allocation_limit (gen), size)); retry: { heap_segment* seg = generation_allocation_segment (gen); if (!(loh_size_fit_p (size, generation_allocation_pointer (gen), generation_allocation_limit (gen)))) { if ((!(loh_pinned_plug_que_empty_p()) && (generation_allocation_limit (gen) == pinned_plug (loh_oldest_pin())))) { mark* m = loh_pinned_plug_of (loh_deque_pinned_plug()); size_t len = pinned_len (m); uint8_t* plug = pinned_plug (m); dprintf (1235, ("AIC: %Ix->%Ix(%Id)", generation_allocation_pointer (gen), plug, plug - generation_allocation_pointer (gen))); pinned_len (m) = plug - generation_allocation_pointer (gen); generation_allocation_pointer (gen) = plug + len; generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); loh_set_allocator_next_pin(); dprintf (1235, ("s: p: %Ix, l: %Ix (%Id)", generation_allocation_pointer (gen), generation_allocation_limit (gen), (generation_allocation_limit (gen) - generation_allocation_pointer (gen)))); goto retry; } if (generation_allocation_limit (gen) != heap_segment_plan_allocated (seg)) { generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); dprintf (1235, ("l->pa(%Ix)", generation_allocation_limit (gen))); } else { if (heap_segment_plan_allocated (seg) != heap_segment_committed (seg)) { heap_segment_plan_allocated (seg) = heap_segment_committed (seg); generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); dprintf (1235, ("l->c(%Ix)", generation_allocation_limit (gen))); } else { if (loh_size_fit_p (size, generation_allocation_pointer (gen), heap_segment_reserved (seg)) && (grow_heap_segment (seg, (generation_allocation_pointer (gen) + size + 2* AlignQword (loh_padding_obj_size))))) { dprintf (1235, ("growing seg from %Ix to %Ix\n", heap_segment_committed (seg), (generation_allocation_pointer (gen) + size))); heap_segment_plan_allocated (seg) = heap_segment_committed (seg); generation_allocation_limit (gen) = heap_segment_plan_allocated (seg); dprintf (1235, ("g: p: %Ix, l: %Ix (%Id)", generation_allocation_pointer (gen), generation_allocation_limit (gen), (generation_allocation_limit (gen) - generation_allocation_pointer (gen)))); } else { heap_segment* next_seg = heap_segment_next (seg); assert (generation_allocation_pointer (gen)>= heap_segment_mem (seg)); // Verify that all pinned plugs for this segment are consumed if (!loh_pinned_plug_que_empty_p() && ((pinned_plug (loh_oldest_pin()) < heap_segment_allocated (seg)) && (pinned_plug (loh_oldest_pin()) >= generation_allocation_pointer (gen)))) { LOG((LF_GC, LL_INFO10, "remaining pinned plug %Ix while leaving segment on allocation", pinned_plug (loh_oldest_pin()))); dprintf (1, ("queue empty: %d", loh_pinned_plug_que_empty_p())); FATAL_GC_ERROR(); } assert (generation_allocation_pointer (gen)>= heap_segment_mem (seg)); assert (generation_allocation_pointer (gen)<= heap_segment_committed (seg)); heap_segment_plan_allocated (seg) = generation_allocation_pointer (gen); if (next_seg) { // for LOH do we want to try starting from the first LOH every time though? generation_allocation_segment (gen) = next_seg; generation_allocation_pointer (gen) = heap_segment_mem (next_seg); generation_allocation_limit (gen) = generation_allocation_pointer (gen); dprintf (1235, ("n: p: %Ix, l: %Ix (%Id)", generation_allocation_pointer (gen), generation_allocation_limit (gen), (generation_allocation_limit (gen) - generation_allocation_pointer (gen)))); } else { dprintf (1, ("We ran out of space compacting, shouldn't happen")); FATAL_GC_ERROR(); } } } } loh_set_allocator_next_pin(); dprintf (1235, ("r: p: %Ix, l: %Ix (%Id)", generation_allocation_pointer (gen), generation_allocation_limit (gen), (generation_allocation_limit (gen) - generation_allocation_pointer (gen)))); goto retry; } } { assert (generation_allocation_pointer (gen)>= heap_segment_mem (generation_allocation_segment (gen))); uint8_t* result = generation_allocation_pointer (gen); size_t loh_pad = AlignQword (loh_padding_obj_size); generation_allocation_pointer (gen) += size + loh_pad; assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen)); dprintf (1235, ("p: %Ix, l: %Ix (%Id)", generation_allocation_pointer (gen), generation_allocation_limit (gen), (generation_allocation_limit (gen) - generation_allocation_pointer (gen)))); assert (result + loh_pad); return result + loh_pad; } } BOOL gc_heap::loh_compaction_requested() { // If hard limit is specified GC will automatically decide if LOH needs to be compacted. return (loh_compaction_always_p || (loh_compaction_mode != loh_compaction_default)); } inline void gc_heap::check_loh_compact_mode (BOOL all_heaps_compacted_p) { if (settings.loh_compaction && (loh_compaction_mode == loh_compaction_once)) { if (all_heaps_compacted_p) { // If the compaction mode says to compact once and we are going to compact LOH, // we need to revert it back to no compaction. loh_compaction_mode = loh_compaction_default; } } } BOOL gc_heap::plan_loh() { #ifdef FEATURE_EVENT_TRACE uint64_t start_time, end_time; if (informational_event_enabled_p) { memset (loh_compact_info, 0, (sizeof (etw_loh_compact_info) * get_num_heaps())); start_time = GetHighPrecisionTimeStamp(); } #endif //FEATURE_EVENT_TRACE if (!loh_pinned_queue) { loh_pinned_queue = new (nothrow) (mark [LOH_PIN_QUEUE_LENGTH]); if (!loh_pinned_queue) { dprintf (1, ("Cannot allocate the LOH pinned queue (%Id bytes), no compaction", LOH_PIN_QUEUE_LENGTH * sizeof (mark))); return FALSE; } loh_pinned_queue_length = LOH_PIN_QUEUE_LENGTH; } if (heap_number == 0) loh_pinned_queue_decay = LOH_PIN_DECAY; loh_pinned_queue_tos = 0; loh_pinned_queue_bos = 0; generation* gen = large_object_generation; heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(start_seg != NULL); heap_segment* seg = start_seg; uint8_t* o = get_uoh_start_object (seg, gen); dprintf (1235, ("before GC LOH size: %Id, free list: %Id, free obj: %Id\n", generation_size (loh_generation), generation_free_list_space (gen), generation_free_obj_space (gen))); while (seg) { heap_segment_plan_allocated (seg) = heap_segment_mem (seg); seg = heap_segment_next (seg); } seg = start_seg; // We don't need to ever realloc gen3 start so don't touch it. heap_segment_plan_allocated (seg) = o; generation_allocation_pointer (gen) = o; generation_allocation_limit (gen) = generation_allocation_pointer (gen); generation_allocation_segment (gen) = start_seg; uint8_t* free_space_start = o; uint8_t* free_space_end = o; uint8_t* new_address = 0; while (1) { if (o >= heap_segment_allocated (seg)) { seg = heap_segment_next (seg); if (seg == 0) { break; } o = heap_segment_mem (seg); } if (marked (o)) { free_space_end = o; size_t size = AlignQword (size (o)); dprintf (1235, ("%Ix(%Id) M", o, size)); if (pinned (o)) { // We don't clear the pinned bit yet so we can check in // compact phase how big a free object we should allocate // in front of the pinned object. We use the reloc address // field to store this. if (!loh_enque_pinned_plug (o, size)) { return FALSE; } new_address = o; } else { new_address = loh_allocate_in_condemned (size); } loh_set_node_relocation_distance (o, (new_address - o)); dprintf (1235, ("lobj %Ix-%Ix -> %Ix-%Ix (%Id)", o, (o + size), new_address, (new_address + size), (new_address - o))); o = o + size; free_space_start = o; if (o < heap_segment_allocated (seg)) { assert (!marked (o)); } } else { while (o < heap_segment_allocated (seg) && !marked (o)) { dprintf (1235, ("%Ix(%Id) F (%d)", o, AlignQword (size (o)), ((method_table (o) == g_gc_pFreeObjectMethodTable) ? 1 : 0))); o = o + AlignQword (size (o)); } } } while (!loh_pinned_plug_que_empty_p()) { mark* m = loh_pinned_plug_of (loh_deque_pinned_plug()); size_t len = pinned_len (m); uint8_t* plug = pinned_plug (m); // detect pinned block in different segment (later) than // allocation segment heap_segment* nseg = heap_segment_rw (generation_allocation_segment (gen)); while ((plug < generation_allocation_pointer (gen)) || (plug >= heap_segment_allocated (nseg))) { assert ((plug < heap_segment_mem (nseg)) || (plug > heap_segment_reserved (nseg))); //adjust the end of the segment to be the end of the plug assert (generation_allocation_pointer (gen)>= heap_segment_mem (nseg)); assert (generation_allocation_pointer (gen)<= heap_segment_committed (nseg)); heap_segment_plan_allocated (nseg) = generation_allocation_pointer (gen); //switch allocation segment nseg = heap_segment_next_rw (nseg); generation_allocation_segment (gen) = nseg; //reset the allocation pointer and limits generation_allocation_pointer (gen) = heap_segment_mem (nseg); } dprintf (1235, ("SP: %Ix->%Ix(%Id)", generation_allocation_pointer (gen), plug, plug - generation_allocation_pointer (gen))); pinned_len (m) = plug - generation_allocation_pointer (gen); generation_allocation_pointer (gen) = plug + len; } heap_segment_plan_allocated (generation_allocation_segment (gen)) = generation_allocation_pointer (gen); generation_allocation_pointer (gen) = 0; generation_allocation_limit (gen) = 0; #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { end_time = GetHighPrecisionTimeStamp(); loh_compact_info[heap_number].time_plan = limit_time_to_uint32 (end_time - start_time); } #endif //FEATURE_EVENT_TRACE return TRUE; } void gc_heap::compact_loh() { assert (loh_compaction_requested() || heap_hard_limit || conserve_mem_setting); #ifdef FEATURE_EVENT_TRACE uint64_t start_time, end_time; if (informational_event_enabled_p) { start_time = GetHighPrecisionTimeStamp(); } #endif //FEATURE_EVENT_TRACE generation* gen = large_object_generation; heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(start_seg != NULL); heap_segment* seg = start_seg; heap_segment* prev_seg = 0; uint8_t* o = get_uoh_start_object (seg, gen); // We don't need to ever realloc gen3 start so don't touch it. uint8_t* free_space_start = o; uint8_t* free_space_end = o; generation_allocator (gen)->clear(); generation_free_list_space (gen) = 0; generation_free_obj_space (gen) = 0; loh_pinned_queue_bos = 0; while (1) { if (o >= heap_segment_allocated (seg)) { heap_segment* next_seg = heap_segment_next (seg); // REGIONS TODO: for regions we can get rid of the start_seg. Just need // to update start region accordingly. if ((heap_segment_plan_allocated (seg) == heap_segment_mem (seg)) && (seg != start_seg) && !heap_segment_read_only_p (seg)) { dprintf (3, ("Preparing empty large segment %Ix", (size_t)seg)); assert (prev_seg); heap_segment_next (prev_seg) = next_seg; heap_segment_next (seg) = freeable_uoh_segment; freeable_uoh_segment = seg; #ifdef USE_REGIONS update_start_tail_regions (gen, seg, prev_seg, next_seg); #endif //USE_REGIONS } else { if (!heap_segment_read_only_p (seg)) { // We grew the segment to accommodate allocations. if (heap_segment_plan_allocated (seg) > heap_segment_allocated (seg)) { if ((heap_segment_plan_allocated (seg) - plug_skew) > heap_segment_used (seg)) { heap_segment_used (seg) = heap_segment_plan_allocated (seg) - plug_skew; } } heap_segment_allocated (seg) = heap_segment_plan_allocated (seg); dprintf (3, ("Trimming seg to %Ix[", heap_segment_allocated (seg))); decommit_heap_segment_pages (seg, 0); dprintf (1236, ("CLOH: seg: %Ix, alloc: %Ix, used: %Ix, committed: %Ix", seg, heap_segment_allocated (seg), heap_segment_used (seg), heap_segment_committed (seg))); //heap_segment_used (seg) = heap_segment_allocated (seg) - plug_skew; dprintf (1236, ("CLOH: used is set to %Ix", heap_segment_used (seg))); } prev_seg = seg; } seg = next_seg; if (seg == 0) break; else { o = heap_segment_mem (seg); } } if (marked (o)) { free_space_end = o; size_t size = AlignQword (size (o)); size_t loh_pad; uint8_t* reloc = o; clear_marked (o); if (pinned (o)) { // We are relying on the fact the pinned objects are always looked at in the same order // in plan phase and in compact phase. mark* m = loh_pinned_plug_of (loh_deque_pinned_plug()); uint8_t* plug = pinned_plug (m); assert (plug == o); loh_pad = pinned_len (m); clear_pinned (o); } else { loh_pad = AlignQword (loh_padding_obj_size); reloc += loh_node_relocation_distance (o); gcmemcopy (reloc, o, size, TRUE); } thread_gap ((reloc - loh_pad), loh_pad, gen); o = o + size; free_space_start = o; if (o < heap_segment_allocated (seg)) { assert (!marked (o)); } } else { while (o < heap_segment_allocated (seg) && !marked (o)) { o = o + AlignQword (size (o)); } } } #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { end_time = GetHighPrecisionTimeStamp(); loh_compact_info[heap_number].time_compact = limit_time_to_uint32 (end_time - start_time); } #endif //FEATURE_EVENT_TRACE assert (loh_pinned_plug_que_empty_p()); dprintf (1235, ("after GC LOH size: %Id, free list: %Id, free obj: %Id\n\n", generation_size (loh_generation), generation_free_list_space (gen), generation_free_obj_space (gen))); } #ifdef FEATURE_EVENT_TRACE inline void gc_heap::loh_reloc_survivor_helper (uint8_t** pval, size_t& total_refs, size_t& zero_refs) { uint8_t* val = *pval; if (!val) zero_refs++; total_refs++; reloc_survivor_helper (pval); } #endif //FEATURE_EVENT_TRACE void gc_heap::relocate_in_loh_compact() { generation* gen = large_object_generation; heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); uint8_t* o = get_uoh_start_object (seg, gen); #ifdef FEATURE_EVENT_TRACE size_t total_refs = 0; size_t zero_refs = 0; uint64_t start_time, end_time; if (informational_event_enabled_p) { start_time = GetHighPrecisionTimeStamp(); } #endif //FEATURE_EVENT_TRACE while (1) { if (o >= heap_segment_allocated (seg)) { seg = heap_segment_next (seg); if (seg == 0) { break; } o = heap_segment_mem (seg); } if (marked (o)) { size_t size = AlignQword (size (o)); check_class_object_demotion (o); if (contain_pointers (o)) { #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { go_through_object_nostart (method_table (o), o, size(o), pval, { loh_reloc_survivor_helper (pval, total_refs, zero_refs); }); } else #endif //FEATURE_EVENT_TRACE { go_through_object_nostart (method_table (o), o, size(o), pval, { reloc_survivor_helper (pval); }); } } o = o + size; if (o < heap_segment_allocated (seg)) { assert (!marked (o)); } } else { while (o < heap_segment_allocated (seg) && !marked (o)) { o = o + AlignQword (size (o)); } } } #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { end_time = GetHighPrecisionTimeStamp(); loh_compact_info[heap_number].time_relocate = limit_time_to_uint32 (end_time - start_time); loh_compact_info[heap_number].total_refs = total_refs; loh_compact_info[heap_number].zero_refs = zero_refs; } #endif //FEATURE_EVENT_TRACE dprintf (1235, ("after GC LOH size: %Id, free list: %Id, free obj: %Id\n\n", generation_size (loh_generation), generation_free_list_space (gen), generation_free_obj_space (gen))); } void gc_heap::walk_relocation_for_loh (void* profiling_context, record_surv_fn fn) { generation* gen = large_object_generation; heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); uint8_t* o = get_uoh_start_object (seg, gen); while (1) { if (o >= heap_segment_allocated (seg)) { seg = heap_segment_next (seg); if (seg == 0) { break; } o = heap_segment_mem (seg); } if (marked (o)) { size_t size = AlignQword (size (o)); ptrdiff_t reloc = loh_node_relocation_distance (o); STRESS_LOG_PLUG_MOVE(o, (o + size), -reloc); fn (o, (o + size), reloc, profiling_context, !!settings.compaction, false); o = o + size; if (o < heap_segment_allocated (seg)) { assert (!marked (o)); } } else { while (o < heap_segment_allocated (seg) && !marked (o)) { o = o + AlignQword (size (o)); } } } } BOOL gc_heap::loh_object_p (uint8_t* o) { #ifdef MULTIPLE_HEAPS gc_heap* hp = gc_heap::g_heaps [0]; int brick_entry = hp->brick_table[hp->brick_of (o)]; #else //MULTIPLE_HEAPS int brick_entry = brick_table[brick_of (o)]; #endif //MULTIPLE_HEAPS return (brick_entry == 0); } #endif //FEATURE_LOH_COMPACTION void gc_heap::convert_to_pinned_plug (BOOL& last_npinned_plug_p, BOOL& last_pinned_plug_p, BOOL& pinned_plug_p, size_t ps, size_t& artificial_pinned_size) { last_npinned_plug_p = FALSE; last_pinned_plug_p = TRUE; pinned_plug_p = TRUE; artificial_pinned_size = ps; } // Because we have the artificial pinning, we can't guarantee that pinned and npinned // plugs are always interleaved. void gc_heap::store_plug_gap_info (uint8_t* plug_start, uint8_t* plug_end, BOOL& last_npinned_plug_p, BOOL& last_pinned_plug_p, uint8_t*& last_pinned_plug, BOOL& pinned_plug_p, uint8_t* last_object_in_last_plug, BOOL& merge_with_last_pin_p, // this is only for verification purpose size_t last_plug_len) { UNREFERENCED_PARAMETER(last_plug_len); if (!last_npinned_plug_p && !last_pinned_plug_p) { //dprintf (3, ("last full plug end: %Ix, full plug start: %Ix", plug_end, plug_start)); dprintf (3, ("Free: %Ix", (plug_start - plug_end))); assert ((plug_start == plug_end) || ((size_t)(plug_start - plug_end) >= Align (min_obj_size))); set_gap_size (plug_start, plug_start - plug_end); } if (pinned (plug_start)) { BOOL save_pre_plug_info_p = FALSE; if (last_npinned_plug_p || last_pinned_plug_p) { //if (last_plug_len == Align (min_obj_size)) //{ // dprintf (3, ("debugging only - last npinned plug is min, check to see if it's correct")); // GCToOSInterface::DebugBreak(); //} save_pre_plug_info_p = TRUE; } pinned_plug_p = TRUE; last_npinned_plug_p = FALSE; if (last_pinned_plug_p) { dprintf (3, ("last plug %Ix was also pinned, should merge", last_pinned_plug)); merge_with_last_pin_p = TRUE; } else { last_pinned_plug_p = TRUE; last_pinned_plug = plug_start; enque_pinned_plug (last_pinned_plug, save_pre_plug_info_p, last_object_in_last_plug); if (save_pre_plug_info_p) { #ifdef DOUBLY_LINKED_FL if (last_object_in_last_plug == generation_last_free_list_allocated(generation_of(max_generation))) { saved_pinned_plug_index = mark_stack_tos; } #endif //DOUBLY_LINKED_FL set_gap_size (plug_start, sizeof (gap_reloc_pair)); } } } else { if (last_pinned_plug_p) { //if (Align (last_plug_len) < min_pre_pin_obj_size) //{ // dprintf (3, ("debugging only - last pinned plug is min, check to see if it's correct")); // GCToOSInterface::DebugBreak(); //} save_post_plug_info (last_pinned_plug, last_object_in_last_plug, plug_start); set_gap_size (plug_start, sizeof (gap_reloc_pair)); verify_pins_with_post_plug_info("after saving post plug info"); } last_npinned_plug_p = TRUE; last_pinned_plug_p = FALSE; } } void gc_heap::record_interesting_data_point (interesting_data_point idp) { #ifdef GC_CONFIG_DRIVEN (interesting_data_per_gc[idp])++; #else UNREFERENCED_PARAMETER(idp); #endif //GC_CONFIG_DRIVEN } #ifdef USE_REGIONS void gc_heap::skip_pins_in_alloc_region (generation* consing_gen, int plan_gen_num) { heap_segment* alloc_region = generation_allocation_segment (consing_gen); while (!pinned_plug_que_empty_p()) { uint8_t* oldest_plug = pinned_plug (oldest_pin()); if ((oldest_plug >= generation_allocation_pointer (consing_gen)) && (oldest_plug < heap_segment_allocated (alloc_region))) { mark* m = pinned_plug_of (deque_pinned_plug()); uint8_t* plug = pinned_plug (m); size_t len = pinned_len (m); set_new_pin_info (m, generation_allocation_pointer (consing_gen)); dprintf (REGIONS_LOG, ("pin %Ix b: %Ix->%Ix", plug, brick_of (plug), (size_t)(brick_table[brick_of (plug)]))); generation_allocation_pointer (consing_gen) = plug + len; } else { // Exit when we detect the first pin that's not on the alloc seg anymore. break; } } dprintf (REGIONS_LOG, ("finished with alloc region %Ix, (%s) plan gen -> %d", heap_segment_mem (alloc_region), (heap_segment_swept_in_plan (alloc_region) ? "SIP" : "non SIP"), (heap_segment_swept_in_plan (alloc_region) ? heap_segment_plan_gen_num (alloc_region) : plan_gen_num))); set_region_plan_gen_num_sip (alloc_region, plan_gen_num); heap_segment_plan_allocated (alloc_region) = generation_allocation_pointer (consing_gen); } void gc_heap::decide_on_demotion_pin_surv (heap_segment* region) { int new_gen_num = 0; if (settings.promotion) { // If this region doesn't have much pinned surv left, we demote it; otherwise the region // will be promoted like normal. size_t basic_region_size = (size_t)1 << min_segment_size_shr; if ((int)(((double)heap_segment_pinned_survived (region) * 100.0) / (double)basic_region_size) >= demotion_pinned_ratio_th) { new_gen_num = get_plan_gen_num (heap_segment_gen_num (region)); } } set_region_plan_gen_num_sip (region, new_gen_num); } // If the next plan gen number is different, since different generations cannot share the same // region, we need to get a new alloc region and skip all remaining pins in the alloc region if // any. void gc_heap::process_last_np_surv_region (generation* consing_gen, int current_plan_gen_num, int next_plan_gen_num) { heap_segment* alloc_region = generation_allocation_segment (consing_gen); //assert (in_range_for_segment (generation_allocation_pointer (consing_gen), alloc_region)); // I'm not using in_range_for_segment here because alloc pointer/limit can be exactly the same // as reserved. size_fit_p in allocate_in_condemned_generations can be used to fit the exact // size of a plug at the end of the segment which makes alloc pointer/limit both reserved // on exit of that method. uint8_t* consing_gen_alloc_ptr = generation_allocation_pointer (consing_gen); assert ((consing_gen_alloc_ptr >= heap_segment_mem (alloc_region)) && (consing_gen_alloc_ptr <= heap_segment_reserved (alloc_region))); dprintf (REGIONS_LOG, ("h%d next need to plan gen%d, consing alloc region: %Ix, ptr: %Ix(consing gen: %d)", heap_number, next_plan_gen_num, heap_segment_mem (alloc_region), generation_allocation_pointer (consing_gen), consing_gen->gen_num)); if (current_plan_gen_num != next_plan_gen_num) { // If we haven't needed to consume this alloc region at all, we can use it to allocate the new // gen. if (generation_allocation_pointer (consing_gen) == heap_segment_mem (alloc_region)) { dprintf (REGIONS_LOG, ("h%d alloc region %Ix unused, using it to plan %d", heap_number, heap_segment_mem (alloc_region), next_plan_gen_num)); return; } // skip all the pins in this region since we cannot use it to plan the next gen. skip_pins_in_alloc_region (consing_gen, current_plan_gen_num); heap_segment* next_region = heap_segment_next (alloc_region); if (!next_region) { int gen_num = heap_segment_gen_num (alloc_region); if (gen_num > 0) { next_region = generation_start_segment (generation_of (gen_num - 1)); dprintf (REGIONS_LOG, ("h%d consing switching to next gen%d seg %Ix", heap_number, heap_segment_gen_num (next_region), heap_segment_mem (next_region))); } else { if (settings.promotion) { assert (next_plan_gen_num == 0); next_region = get_new_region (0); if (next_region) { dprintf (REGIONS_LOG, ("h%d getting a new region for gen0 plan start seg to %Ix", heap_number, heap_segment_mem (next_region))); } else { dprintf (REGIONS_LOG, ("h%d couldn't get a region to plan gen0, special sweep on", heap_number)); special_sweep_p = true; } } else { assert (!"ran out of regions for non promotion case??"); } } } else { dprintf (REGIONS_LOG, ("h%d consing switching to next seg %Ix in gen%d to alloc in", heap_number, heap_segment_mem (next_region), heap_segment_gen_num (next_region))); } if (next_region) { init_alloc_info (consing_gen, next_region); dprintf (REGIONS_LOG, ("h%d consing(%d) alloc seg: %Ix(%Ix, %Ix), ptr: %Ix, planning gen%d", heap_number, consing_gen->gen_num, heap_segment_mem (generation_allocation_segment (consing_gen)), heap_segment_allocated (generation_allocation_segment (consing_gen)), heap_segment_plan_allocated (generation_allocation_segment (consing_gen)), generation_allocation_pointer (consing_gen), next_plan_gen_num)); } else { assert (special_sweep_p); } } } void gc_heap::process_remaining_regions (int current_plan_gen_num, generation* consing_gen) { assert ((current_plan_gen_num == 0) || (!settings.promotion && (current_plan_gen_num == -1))); if (special_sweep_p) { assert (pinned_plug_que_empty_p()); } dprintf (REGIONS_LOG, ("h%d PRR: plan %d: consing alloc seg: %Ix, ptr: %Ix", heap_number, current_plan_gen_num, heap_segment_mem (generation_allocation_segment (consing_gen)), generation_allocation_pointer (consing_gen))); if (current_plan_gen_num == -1) { assert (!settings.promotion); current_plan_gen_num = 0; } while (!pinned_plug_que_empty_p()) { uint8_t* oldest_plug = pinned_plug (oldest_pin()); // detect pinned block in segments without pins heap_segment* nseg = heap_segment_rw (generation_allocation_segment (consing_gen)); dprintf (3, ("h%d oldest pin: %Ix, consing alloc %Ix, ptr %Ix, limit %Ix", heap_number, oldest_plug, heap_segment_mem (nseg), generation_allocation_pointer (consing_gen), generation_allocation_limit (consing_gen))); while ((oldest_plug < generation_allocation_pointer (consing_gen)) || (oldest_plug >= heap_segment_allocated (nseg))) { assert ((oldest_plug < heap_segment_mem (nseg)) || (oldest_plug > heap_segment_reserved (nseg))); assert (generation_allocation_pointer (consing_gen)>= heap_segment_mem (nseg)); assert (generation_allocation_pointer (consing_gen)<= heap_segment_committed (nseg)); dprintf (3, ("h%d PRR: in loop, seg %Ix pa %Ix -> alloc ptr %Ix, plan gen %d->%d", heap_number, heap_segment_mem (nseg), heap_segment_plan_allocated (nseg), generation_allocation_pointer (consing_gen), heap_segment_plan_gen_num (nseg), current_plan_gen_num)); if (!heap_segment_swept_in_plan (nseg)) { heap_segment_plan_allocated (nseg) = generation_allocation_pointer (consing_gen); } decide_on_demotion_pin_surv (nseg); heap_segment* next_seg = heap_segment_next_non_sip (nseg); if ((next_seg == 0) && (heap_segment_gen_num (nseg) > 0)) { next_seg = generation_start_segment (generation_of (heap_segment_gen_num (nseg) - 1)); dprintf (3, ("h%d PRR: switching to next gen%d start %Ix", heap_number, heap_segment_gen_num (next_seg), (size_t)next_seg)); } assert (next_seg != 0); nseg = next_seg; generation_allocation_segment (consing_gen) = nseg; generation_allocation_pointer (consing_gen) = heap_segment_mem (nseg); } mark* m = pinned_plug_of (deque_pinned_plug()); uint8_t* plug = pinned_plug (m); size_t len = pinned_len (m); set_new_pin_info (m, generation_allocation_pointer (consing_gen)); size_t free_size = pinned_len (m); update_planned_gen0_free_space (free_size, plug); dprintf (2, ("h%d plug %Ix-%Ix(%Id), free space before %Ix-%Ix(%Id)", heap_number, plug, (plug + len), len, generation_allocation_pointer (consing_gen), plug, free_size)); generation_allocation_pointer (consing_gen) = plug + len; generation_allocation_limit (consing_gen) = generation_allocation_pointer (consing_gen); } heap_segment* current_region = generation_allocation_segment (consing_gen); if (special_sweep_p) { assert (heap_segment_next_rw (current_region) == 0); return; } set_region_plan_gen_num_sip (current_region, current_plan_gen_num); if (!heap_segment_swept_in_plan (current_region)) { heap_segment_plan_allocated (current_region) = generation_allocation_pointer (consing_gen); dprintf (REGIONS_LOG, ("h%d setting alloc seg %Ix plan alloc to %Ix", heap_number, heap_segment_mem (current_region), heap_segment_plan_allocated (current_region))); } heap_segment* region_no_pins = heap_segment_next (current_region); int region_no_pins_gen_num = heap_segment_gen_num (current_region); do { region_no_pins = heap_segment_non_sip (region_no_pins); if (region_no_pins) { set_region_plan_gen_num (region_no_pins, current_plan_gen_num); heap_segment_plan_allocated (region_no_pins) = heap_segment_mem (region_no_pins); dprintf (REGIONS_LOG, ("h%d setting seg %Ix(no pins) plan gen to 0, plan alloc to %Ix", heap_number, heap_segment_mem (region_no_pins), heap_segment_plan_allocated (region_no_pins))); region_no_pins = heap_segment_next (region_no_pins); } else { if (region_no_pins_gen_num > 0) { region_no_pins_gen_num--; region_no_pins = generation_start_segment (generation_of (region_no_pins_gen_num)); } else break; } } while (region_no_pins); } void gc_heap::grow_mark_list_piece() { if (g_mark_list_piece_size < region_count) { delete[] g_mark_list_piece; // at least double the size size_t alloc_count = max ((g_mark_list_piece_size * 2), region_count); // we need two arrays with alloc_count entries per heap g_mark_list_piece = new (nothrow) uint8_t * *[alloc_count * 2 * get_num_heaps()]; if (g_mark_list_piece != nullptr) { g_mark_list_piece_size = alloc_count; } else { g_mark_list_piece_size = 0; } } } void gc_heap::save_current_survived() { if (!survived_per_region) return; size_t region_info_to_copy = region_count * sizeof (size_t); memcpy (old_card_survived_per_region, survived_per_region, region_info_to_copy); #ifdef _DEBUG for (size_t region_index = 0; region_index < region_count; region_index++) { if (survived_per_region[region_index] != 0) { dprintf (REGIONS_LOG, ("region#[%3d]: %Id", region_index, survived_per_region[region_index])); } } dprintf (REGIONS_LOG, ("global reported %Id", promoted_bytes (heap_number))); #endif //_DEBUG } void gc_heap::update_old_card_survived() { if (!survived_per_region) return; for (size_t region_index = 0; region_index < region_count; region_index++) { old_card_survived_per_region[region_index] = survived_per_region[region_index] - old_card_survived_per_region[region_index]; if (survived_per_region[region_index] != 0) { dprintf (REGIONS_LOG, ("region#[%3d]: %Id (card: %Id)", region_index, survived_per_region[region_index], old_card_survived_per_region[region_index])); } } } void gc_heap::update_planned_gen0_free_space (size_t free_size, uint8_t* plug) { gen0_pinned_free_space += free_size; if (!gen0_large_chunk_found) { gen0_large_chunk_found = (free_size >= END_SPACE_AFTER_GC_FL); if (gen0_large_chunk_found) { dprintf (3, ("h%d found large pin free space: %Id at %Ix", heap_number, free_size, plug)); } } } // REGIONS TODO: I wrote this in the same spirit as ephemeral_gen_fit_p but we really should // take committed into consideration instead of reserved. We could also avoid going through // the regions again and do this update in plan phase. void gc_heap::get_gen0_end_plan_space() { for (int gen_idx = settings.condemned_generation; gen_idx >= 0; gen_idx--) { generation* gen = generation_of (gen_idx); heap_segment* region = heap_segment_rw (generation_start_segment (gen)); while (region) { if (heap_segment_plan_gen_num (region) == 0) { size_t end_plan_space = heap_segment_reserved (region) - heap_segment_plan_allocated (region); if (!gen0_large_chunk_found) { gen0_large_chunk_found = (end_plan_space >= END_SPACE_AFTER_GC_FL); if (gen0_large_chunk_found) { dprintf (REGIONS_LOG, ("h%d found large end space: %Id in region %Ix", heap_number, end_plan_space, heap_segment_mem (region))); } } dprintf (REGIONS_LOG, ("h%d found end space: %Id in region %Ix, total %Id->%Id", heap_number, end_plan_space, heap_segment_mem (region), end_gen0_region_space, (end_gen0_region_space + end_plan_space))); end_gen0_region_space += end_plan_space; } region = heap_segment_next (region); } } } size_t gc_heap::get_gen0_end_space() { size_t end_space = 0; heap_segment* seg = generation_start_segment (generation_of (0)); while (seg) { // TODO - // This method can also be called concurrently by full GC notification but // there's no synchronization between checking for ephemeral_heap_segment and // getting alloc_allocated so for now we just always use heap_segment_allocated. //uint8_t* allocated = ((seg == ephemeral_heap_segment) ? // alloc_allocated : heap_segment_allocated (seg)); uint8_t* allocated = heap_segment_allocated (seg); end_space += heap_segment_reserved (seg) - allocated; dprintf (REGIONS_LOG, ("h%d gen0 seg %Ix, end %Ix-%Ix=%Ix, end_space->%Id", heap_number, heap_segment_mem (seg), heap_segment_reserved (seg), allocated, (heap_segment_reserved (seg) - allocated), end_space)); seg = heap_segment_next (seg); } return end_space; } #endif //USE_REGIONS inline uint8_t* gc_heap::find_next_marked (uint8_t* x, uint8_t* end, BOOL use_mark_list, uint8_t**& mark_list_next, uint8_t** mark_list_index) { if (use_mark_list) { uint8_t* old_x = x; while ((mark_list_next < mark_list_index) && (*mark_list_next <= x)) { mark_list_next++; } x = end; if ((mark_list_next < mark_list_index) #ifdef MULTIPLE_HEAPS && (*mark_list_next < end) //for multiple segments #endif //MULTIPLE_HEAPS ) x = *mark_list_next; #ifdef BACKGROUND_GC if (current_c_gc_state == c_gc_state_marking) { assert(gc_heap::background_running_p()); bgc_clear_batch_mark_array_bits (old_x, x); } #endif //BACKGROUND_GC } else { uint8_t* xl = x; #ifdef BACKGROUND_GC if (current_c_gc_state == c_gc_state_marking) { assert (gc_heap::background_running_p()); while ((xl < end) && !marked (xl)) { dprintf (4, ("-%Ix-", (size_t)xl)); assert ((size (xl) > 0)); background_object_marked (xl, TRUE); xl = xl + Align (size (xl)); Prefetch (xl); } } else #endif //BACKGROUND_GC { while ((xl < end) && !marked (xl)) { dprintf (4, ("-%Ix-", (size_t)xl)); assert ((size (xl) > 0)); xl = xl + Align (size (xl)); Prefetch (xl); } } assert (xl <= end); x = xl; } return x; } #ifdef FEATURE_EVENT_TRACE void gc_heap::init_bucket_info() { memset (bucket_info, 0, sizeof (bucket_info)); } void gc_heap::add_plug_in_condemned_info (generation* gen, size_t plug_size) { uint32_t bucket_index = generation_allocator (gen)->first_suitable_bucket (plug_size); (bucket_info[bucket_index].count)++; bucket_info[bucket_index].size += plug_size; } #endif //FEATURE_EVENT_TRACE #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:21000) // Suppress PREFast warning about overly large function #endif //_PREFAST_ void gc_heap::plan_phase (int condemned_gen_number) { size_t old_gen2_allocated = 0; size_t old_gen2_size = 0; if (condemned_gen_number == (max_generation - 1)) { old_gen2_allocated = generation_free_list_allocated (generation_of (max_generation)); old_gen2_size = generation_size (max_generation); } assert (settings.concurrent == FALSE); dprintf (2,(ThreadStressLog::gcStartPlanMsg(), heap_number, condemned_gen_number, settings.promotion ? 1 : 0)); generation* condemned_gen1 = generation_of (condemned_gen_number); BOOL use_mark_list = FALSE; #ifdef GC_CONFIG_DRIVEN dprintf (3, ("total number of marked objects: %Id (%Id)", (mark_list_index - &mark_list[0]), (mark_list_end - &mark_list[0]))); if (mark_list_index >= (mark_list_end + 1)) { mark_list_index = mark_list_end + 1; #ifndef MULTIPLE_HEAPS // in Server GC, we check for mark list overflow in sort_mark_list mark_list_overflow = true; #endif } #else //GC_CONFIG_DRIVEN dprintf (3, ("mark_list length: %Id", (mark_list_index - &mark_list[0]))); #endif //GC_CONFIG_DRIVEN if ((condemned_gen_number < max_generation) && (mark_list_index <= mark_list_end)) { #ifndef MULTIPLE_HEAPS #ifdef USE_VXSORT do_vxsort (mark_list, mark_list_index - mark_list, slow, shigh); #else //USE_VXSORT _sort (&mark_list[0], mark_list_index - 1, 0); #endif //USE_VXSORT dprintf (3, ("using mark list at GC #%Id", (size_t)settings.gc_index)); //verify_qsort_array (&mark_list[0], mark_list_index-1); #endif //!MULTIPLE_HEAPS use_mark_list = TRUE; get_gc_data_per_heap()->set_mechanism_bit(gc_mark_list_bit); } else { dprintf (3, ("mark_list not used")); } #ifdef FEATURE_BASICFREEZE #ifdef USE_REGIONS assert (!ro_segments_in_range); #else //USE_REGIONS if ((generation_start_segment (condemned_gen1) != ephemeral_heap_segment) && ro_segments_in_range) { sweep_ro_segments (generation_start_segment (condemned_gen1)); } #endif //USE_REGIONS #endif // FEATURE_BASICFREEZE #ifndef MULTIPLE_HEAPS int condemned_gen_index = get_stop_generation_index (condemned_gen_number); for (; condemned_gen_index <= condemned_gen_number; condemned_gen_index++) { generation* current_gen = generation_of (condemned_gen_index); if (shigh != (uint8_t*)0) { heap_segment* seg = heap_segment_rw (generation_start_segment (current_gen)); PREFIX_ASSUME(seg != NULL); heap_segment* fseg = seg; do { if (in_range_for_segment (slow, seg)) { uint8_t* start_unmarked = 0; #ifdef USE_REGIONS start_unmarked = heap_segment_mem (seg); #else //USE_REGIONS if (seg == fseg) { uint8_t* o = generation_allocation_start (current_gen); o += get_soh_start_obj_len (o); if (slow > o) { start_unmarked = o; assert ((slow - o) >= (int)Align (min_obj_size)); } } else { assert (condemned_gen_number == max_generation); start_unmarked = heap_segment_mem (seg); } #endif //USE_REGIONS if (start_unmarked) { size_t unmarked_size = slow - start_unmarked; if (unmarked_size > 0) { #ifdef BACKGROUND_GC if (current_c_gc_state == c_gc_state_marking) { bgc_clear_batch_mark_array_bits (start_unmarked, slow); } #endif //BACKGROUND_GC make_unused_array (start_unmarked, unmarked_size); } } } if (in_range_for_segment (shigh, seg)) { #ifdef BACKGROUND_GC if (current_c_gc_state == c_gc_state_marking) { bgc_clear_batch_mark_array_bits ((shigh + Align (size (shigh))), heap_segment_allocated (seg)); } #endif //BACKGROUND_GC heap_segment_saved_allocated (seg) = heap_segment_allocated (seg); heap_segment_allocated (seg) = shigh + Align (size (shigh)); } // test if the segment is in the range of [slow, shigh] if (!((heap_segment_reserved (seg) >= slow) && (heap_segment_mem (seg) <= shigh))) { heap_segment_saved_allocated (seg) = heap_segment_allocated (seg); // shorten it to minimum heap_segment_allocated (seg) = heap_segment_mem (seg); } seg = heap_segment_next_rw (seg); } while (seg); } else { heap_segment* seg = heap_segment_rw (generation_start_segment (current_gen)); PREFIX_ASSUME(seg != NULL); heap_segment* sseg = seg; do { uint8_t* start_unmarked = heap_segment_mem (seg); #ifndef USE_REGIONS // shorten it to minimum if (seg == sseg) { // no survivors make all generations look empty uint8_t* o = generation_allocation_start (current_gen); o += get_soh_start_obj_len (o); start_unmarked = o; } #endif //!USE_REGIONS #ifdef BACKGROUND_GC if (current_c_gc_state == c_gc_state_marking) { bgc_clear_batch_mark_array_bits (start_unmarked, heap_segment_allocated (seg)); } #endif //BACKGROUND_GC heap_segment_saved_allocated (seg) = heap_segment_allocated (seg); heap_segment_allocated (seg) = start_unmarked; seg = heap_segment_next_rw (seg); } while (seg); } } #endif //MULTIPLE_HEAPS heap_segment* seg1 = heap_segment_rw (generation_start_segment (condemned_gen1)); PREFIX_ASSUME(seg1 != NULL); uint8_t* end = heap_segment_allocated (seg1); uint8_t* first_condemned_address = get_soh_start_object (seg1, condemned_gen1); uint8_t* x = first_condemned_address; #ifdef USE_REGIONS memset (regions_per_gen, 0, sizeof (regions_per_gen)); memset (sip_maxgen_regions_per_gen, 0, sizeof (sip_maxgen_regions_per_gen)); memset (reserved_free_regions_sip, 0, sizeof (reserved_free_regions_sip)); int pinned_survived_region = 0; uint8_t** mark_list_index = nullptr; uint8_t** mark_list_next = nullptr; if (use_mark_list) mark_list_next = get_region_mark_list (x, end, &mark_list_index); #else // USE_REGIONS assert (!marked (x)); uint8_t** mark_list_next = &mark_list[0]; #endif //USE_REGIONS uint8_t* plug_end = x; uint8_t* tree = 0; size_t sequence_number = 0; uint8_t* last_node = 0; size_t current_brick = brick_of (x); BOOL allocate_in_condemned = ((condemned_gen_number == max_generation)|| (settings.promotion == FALSE)); int active_old_gen_number = condemned_gen_number; int active_new_gen_number = (allocate_in_condemned ? condemned_gen_number: (1 + condemned_gen_number)); generation* older_gen = 0; generation* consing_gen = condemned_gen1; alloc_list r_free_list [MAX_SOH_BUCKET_COUNT]; size_t r_free_list_space = 0; size_t r_free_obj_space = 0; size_t r_older_gen_free_list_allocated = 0; size_t r_older_gen_condemned_allocated = 0; size_t r_older_gen_end_seg_allocated = 0; uint8_t* r_allocation_pointer = 0; uint8_t* r_allocation_limit = 0; uint8_t* r_allocation_start_region = 0; heap_segment* r_allocation_segment = 0; #ifdef FREE_USAGE_STATS size_t r_older_gen_free_space[NUM_GEN_POWER2]; #endif //FREE_USAGE_STATS if ((condemned_gen_number < max_generation)) { older_gen = generation_of (min (max_generation, 1 + condemned_gen_number)); generation_allocator (older_gen)->copy_to_alloc_list (r_free_list); r_free_list_space = generation_free_list_space (older_gen); r_free_obj_space = generation_free_obj_space (older_gen); #ifdef FREE_USAGE_STATS memcpy (r_older_gen_free_space, older_gen->gen_free_spaces, sizeof (r_older_gen_free_space)); #endif //FREE_USAGE_STATS generation_allocate_end_seg_p (older_gen) = FALSE; #ifdef DOUBLY_LINKED_FL if (older_gen->gen_num == max_generation) { generation_set_bgc_mark_bit_p (older_gen) = FALSE; generation_last_free_list_allocated (older_gen) = 0; } #endif //DOUBLY_LINKED_FL r_older_gen_free_list_allocated = generation_free_list_allocated (older_gen); r_older_gen_condemned_allocated = generation_condemned_allocated (older_gen); r_older_gen_end_seg_allocated = generation_end_seg_allocated (older_gen); r_allocation_limit = generation_allocation_limit (older_gen); r_allocation_pointer = generation_allocation_pointer (older_gen); r_allocation_start_region = generation_allocation_context_start_region (older_gen); r_allocation_segment = generation_allocation_segment (older_gen); #ifdef USE_REGIONS if (older_gen->gen_num == max_generation) { check_seg_gen_num (r_allocation_segment); } #endif //USE_REGIONS heap_segment* start_seg = heap_segment_rw (generation_start_segment (older_gen)); PREFIX_ASSUME(start_seg != NULL); #ifdef USE_REGIONS heap_segment* skip_seg = 0; assert (generation_allocation_pointer (older_gen) == 0); assert (generation_allocation_limit (older_gen) == 0); #else //USE_REGIONS heap_segment* skip_seg = ephemeral_heap_segment; if (start_seg != ephemeral_heap_segment) { assert (condemned_gen_number == (max_generation - 1)); } #endif //USE_REGIONS if (start_seg != skip_seg) { while (start_seg && (start_seg != skip_seg)) { assert (heap_segment_allocated (start_seg) >= heap_segment_mem (start_seg)); assert (heap_segment_allocated (start_seg) <= heap_segment_reserved (start_seg)); heap_segment_plan_allocated (start_seg) = heap_segment_allocated (start_seg); start_seg = heap_segment_next_rw (start_seg); } } } //reset all of the segment's plan_allocated { int condemned_gen_index1 = get_stop_generation_index (condemned_gen_number); for (; condemned_gen_index1 <= condemned_gen_number; condemned_gen_index1++) { generation* current_gen = generation_of (condemned_gen_index1); heap_segment* seg2 = heap_segment_rw (generation_start_segment (current_gen)); PREFIX_ASSUME(seg2 != NULL); while (seg2) { #ifdef USE_REGIONS regions_per_gen[condemned_gen_index1]++; dprintf (REGIONS_LOG, ("h%d gen%d %Ix-%Ix", heap_number, condemned_gen_index1, heap_segment_mem (seg2), heap_segment_allocated (seg2))); #endif //USE_REGIONS heap_segment_plan_allocated (seg2) = heap_segment_mem (seg2); seg2 = heap_segment_next_rw (seg2); } } } int condemned_gn = condemned_gen_number; int bottom_gen = 0; init_free_and_plug(); while (condemned_gn >= bottom_gen) { generation* condemned_gen2 = generation_of (condemned_gn); generation_allocator (condemned_gen2)->clear(); generation_free_list_space (condemned_gen2) = 0; generation_free_obj_space (condemned_gen2) = 0; generation_allocation_size (condemned_gen2) = 0; generation_condemned_allocated (condemned_gen2) = 0; generation_sweep_allocated (condemned_gen2) = 0; generation_pinned_allocated (condemned_gen2) = 0; generation_free_list_allocated(condemned_gen2) = 0; generation_end_seg_allocated (condemned_gen2) = 0; generation_pinned_allocation_sweep_size (condemned_gen2) = 0; generation_pinned_allocation_compact_size (condemned_gen2) = 0; #ifdef FREE_USAGE_STATS generation_pinned_free_obj_space (condemned_gen2) = 0; generation_allocated_in_pinned_free (condemned_gen2) = 0; generation_allocated_since_last_pin (condemned_gen2) = 0; #endif //FREE_USAGE_STATS #ifndef USE_REGIONS generation_plan_allocation_start (condemned_gen2) = 0; #endif //!USE_REGIONS generation_allocation_segment (condemned_gen2) = heap_segment_rw (generation_start_segment (condemned_gen2)); PREFIX_ASSUME(generation_allocation_segment(condemned_gen2) != NULL); #ifdef USE_REGIONS generation_allocation_pointer (condemned_gen2) = heap_segment_mem (generation_allocation_segment (condemned_gen2)); #else //USE_REGIONS if (generation_start_segment (condemned_gen2) != ephemeral_heap_segment) { generation_allocation_pointer (condemned_gen2) = heap_segment_mem (generation_allocation_segment (condemned_gen2)); } else { generation_allocation_pointer (condemned_gen2) = generation_allocation_start (condemned_gen2); } #endif //USE_REGIONS generation_allocation_limit (condemned_gen2) = generation_allocation_pointer (condemned_gen2); generation_allocation_context_start_region (condemned_gen2) = generation_allocation_pointer (condemned_gen2); condemned_gn--; } BOOL allocate_first_generation_start = FALSE; if (allocate_in_condemned) { allocate_first_generation_start = TRUE; } dprintf(3,( " From %Ix to %Ix", (size_t)x, (size_t)end)); #ifdef USE_REGIONS if (should_sweep_in_plan (seg1)) { sweep_region_in_plan (seg1, use_mark_list, mark_list_next, mark_list_index); x = end; } #else demotion_low = MAX_PTR; demotion_high = heap_segment_allocated (ephemeral_heap_segment); #endif //!USE_REGIONS // If we are doing a gen1 only because of cards, it means we should not demote any pinned plugs // from gen1. They should get promoted to gen2. demote_gen1_p = !(settings.promotion && (settings.condemned_generation == (max_generation - 1)) && gen_to_condemn_reasons.is_only_condition (gen_low_card_p)); total_ephemeral_size = 0; print_free_and_plug ("BP"); #ifndef USE_REGIONS for (int gen_idx = 0; gen_idx <= max_generation; gen_idx++) { generation* temp_gen = generation_of (gen_idx); dprintf (2, ("gen%d start %Ix, plan start %Ix", gen_idx, generation_allocation_start (temp_gen), generation_plan_allocation_start (temp_gen))); } #endif //!USE_REGIONS #ifdef FEATURE_EVENT_TRACE // When verbose level is enabled we want to record some info about gen2 FL usage during gen1 GCs. // We record the bucket info for the largest FL items and plugs that we have to allocate in condemned. bool record_fl_info_p = (EVENT_ENABLED (GCFitBucketInfo) && (condemned_gen_number == (max_generation - 1))); size_t recorded_fl_info_size = 0; if (record_fl_info_p) init_bucket_info(); bool fire_pinned_plug_events_p = EVENT_ENABLED(PinPlugAtGCTime); #endif //FEATURE_EVENT_TRACE size_t last_plug_len = 0; #ifdef DOUBLY_LINKED_FL gen2_removed_no_undo = 0; saved_pinned_plug_index = INVALID_SAVED_PINNED_PLUG_INDEX; #endif //DOUBLY_LINKED_FL while (1) { if (x >= end) { if (!use_mark_list) { assert (x == end); } #ifdef USE_REGIONS if (heap_segment_swept_in_plan (seg1)) { assert (heap_segment_gen_num (seg1) == active_old_gen_number); dynamic_data* dd_active_old = dynamic_data_of (active_old_gen_number); dd_survived_size (dd_active_old) += heap_segment_survived (seg1); dprintf (REGIONS_LOG, ("region %Ix-%Ix SIP", heap_segment_mem (seg1), heap_segment_allocated (seg1))); } else #endif //USE_REGIONS { assert (heap_segment_allocated (seg1) == end); heap_segment_saved_allocated (seg1) = heap_segment_allocated (seg1); heap_segment_allocated (seg1) = plug_end; current_brick = update_brick_table (tree, current_brick, x, plug_end); dprintf (REGIONS_LOG, ("region %Ix-%Ix(%Ix) non SIP", heap_segment_mem (seg1), heap_segment_allocated (seg1), heap_segment_plan_allocated (seg1))); dprintf (3, ("end of seg: new tree, sequence# 0")); sequence_number = 0; tree = 0; } #ifdef USE_REGIONS heap_segment_pinned_survived (seg1) = pinned_survived_region; dprintf (REGIONS_LOG, ("h%d setting seg %Ix pin surv: %Ix", heap_number, heap_segment_mem (seg1), pinned_survived_region)); pinned_survived_region = 0; if (heap_segment_mem (seg1) == heap_segment_allocated (seg1)) { num_regions_freed_in_sweep++; } #endif //USE_REGIONS if (heap_segment_next_rw (seg1)) { seg1 = heap_segment_next_rw (seg1); end = heap_segment_allocated (seg1); plug_end = x = heap_segment_mem (seg1); current_brick = brick_of (x); #ifdef USE_REGIONS if (use_mark_list) mark_list_next = get_region_mark_list (x, end, &mark_list_index); if (should_sweep_in_plan (seg1)) { sweep_region_in_plan (seg1, use_mark_list, mark_list_next, mark_list_index); x = end; } #endif //USE_REGIONS dprintf(3,( " From %Ix to %Ix", (size_t)x, (size_t)end)); continue; } else { #ifdef USE_REGIONS // We have a few task here when we ran out of regions to go through for the // active_old_gen_number - // // + decide on which pins to skip // + set the planned gen for the regions we process here // + set the consing gen's alloc ptr/limit // + decide on the new active_old_gen_number (which is just the current one - 1) // + decide on the new active_new_gen_number (which depends on settings.promotion) // // Important differences between process_last_np_surv_region and process_ephemeral_boundaries // - it's guaranteed we would ask to allocate gen1 start for promotion and gen0 // start for non promotion case. // - consing_gen is never changed. In fact we really don't need consing_gen, we just // need the alloc ptr/limit pair and the alloc seg. // TODO : should just get rid of consing_gen. // These make things more regular and easier to keep track of. // // Also I'm doing everything here instead of having to have separate code to go // through the left over pins after the main loop in plan phase. int saved_active_new_gen_number = active_new_gen_number; BOOL saved_allocate_in_condemned = allocate_in_condemned; dprintf (REGIONS_LOG, ("h%d switching to look at next gen - current active old %d, new %d, alloc_in_condemned: %d", heap_number, active_old_gen_number, active_new_gen_number, allocate_in_condemned)); if (active_old_gen_number <= (settings.promotion ? (max_generation - 1) : max_generation)) { dprintf (REGIONS_LOG, ("h%d active old: %d, new: %d->%d, allocate_in_condemned %d->1", heap_number, active_old_gen_number, active_new_gen_number, (active_new_gen_number - 1), allocate_in_condemned)); active_new_gen_number--; allocate_in_condemned = TRUE; } if (active_new_gen_number >= 0) { process_last_np_surv_region (consing_gen, saved_active_new_gen_number, active_new_gen_number); } if (active_old_gen_number == 0) { // We need to process the pins on the remaining regions if any. process_remaining_regions (active_new_gen_number, consing_gen); break; } else { active_old_gen_number--; seg1 = heap_segment_rw (generation_start_segment (generation_of (active_old_gen_number))); end = heap_segment_allocated (seg1); plug_end = x = heap_segment_mem (seg1); current_brick = brick_of (x); if (use_mark_list) mark_list_next = get_region_mark_list (x, end, &mark_list_index); if (should_sweep_in_plan (seg1)) { sweep_region_in_plan (seg1, use_mark_list, mark_list_next, mark_list_index); x = end; } dprintf (REGIONS_LOG,("h%d switching to gen%d start region %Ix, %Ix-%Ix", heap_number, active_old_gen_number, heap_segment_mem (seg1), x, end)); continue; } #else //USE_REGIONS break; #endif //USE_REGIONS } } BOOL last_npinned_plug_p = FALSE; BOOL last_pinned_plug_p = FALSE; // last_pinned_plug is the beginning of the last pinned plug. If we merge a plug into a pinned // plug we do not change the value of last_pinned_plug. This happens with artificially pinned plugs - // it can be merged with a previous pinned plug and a pinned plug after it can be merged with it. uint8_t* last_pinned_plug = 0; size_t num_pinned_plugs_in_plug = 0; uint8_t* last_object_in_plug = 0; while ((x < end) && marked (x)) { uint8_t* plug_start = x; uint8_t* saved_plug_end = plug_end; BOOL pinned_plug_p = FALSE; BOOL npin_before_pin_p = FALSE; BOOL saved_last_npinned_plug_p = last_npinned_plug_p; uint8_t* saved_last_object_in_plug = last_object_in_plug; BOOL merge_with_last_pin_p = FALSE; size_t added_pinning_size = 0; size_t artificial_pinned_size = 0; store_plug_gap_info (plug_start, plug_end, last_npinned_plug_p, last_pinned_plug_p, last_pinned_plug, pinned_plug_p, last_object_in_plug, merge_with_last_pin_p, last_plug_len); #ifdef FEATURE_STRUCTALIGN int requiredAlignment = ((CObjectHeader*)plug_start)->GetRequiredAlignment(); size_t alignmentOffset = OBJECT_ALIGNMENT_OFFSET; #endif // FEATURE_STRUCTALIGN { uint8_t* xl = x; while ((xl < end) && marked (xl) && (pinned (xl) == pinned_plug_p)) { assert (xl < end); if (pinned(xl)) { clear_pinned (xl); } #ifdef FEATURE_STRUCTALIGN else { int obj_requiredAlignment = ((CObjectHeader*)xl)->GetRequiredAlignment(); if (obj_requiredAlignment > requiredAlignment) { requiredAlignment = obj_requiredAlignment; alignmentOffset = xl - plug_start + OBJECT_ALIGNMENT_OFFSET; } } #endif // FEATURE_STRUCTALIGN clear_marked (xl); dprintf(4, ("+%Ix+", (size_t)xl)); assert ((size (xl) > 0)); assert ((size (xl) <= loh_size_threshold)); last_object_in_plug = xl; xl = xl + Align (size (xl)); Prefetch (xl); } BOOL next_object_marked_p = ((xl < end) && marked (xl)); if (pinned_plug_p) { // If it is pinned we need to extend to the next marked object as we can't use part of // a pinned object to make the artificial gap (unless the last 3 ptr sized words are all // references but for now I am just using the next non pinned object for that). if (next_object_marked_p) { clear_marked (xl); last_object_in_plug = xl; size_t extra_size = Align (size (xl)); xl = xl + extra_size; added_pinning_size = extra_size; } } else { if (next_object_marked_p) npin_before_pin_p = TRUE; } assert (xl <= end); x = xl; } dprintf (3, ( "%Ix[", (size_t)plug_start)); plug_end = x; size_t ps = plug_end - plug_start; last_plug_len = ps; dprintf (3, ( "%Ix[(%Ix)", (size_t)x, ps)); uint8_t* new_address = 0; if (!pinned_plug_p) { if (allocate_in_condemned && (settings.condemned_generation == max_generation) && (ps > OS_PAGE_SIZE)) { ptrdiff_t reloc = plug_start - generation_allocation_pointer (consing_gen); //reloc should >=0 except when we relocate //across segments and the dest seg is higher then the src if ((ps > (8*OS_PAGE_SIZE)) && (reloc > 0) && ((size_t)reloc < (ps/16))) { dprintf (3, ("Pinning %Ix; reloc would have been: %Ix", (size_t)plug_start, reloc)); // The last plug couldn't have been a npinned plug or it would have // included this plug. assert (!saved_last_npinned_plug_p); if (last_pinned_plug) { dprintf (3, ("artificially pinned plug merged with last pinned plug")); merge_with_last_pin_p = TRUE; } else { enque_pinned_plug (plug_start, FALSE, 0); last_pinned_plug = plug_start; } convert_to_pinned_plug (last_npinned_plug_p, last_pinned_plug_p, pinned_plug_p, ps, artificial_pinned_size); } } } #ifndef USE_REGIONS if (allocate_first_generation_start) { allocate_first_generation_start = FALSE; plan_generation_start (condemned_gen1, consing_gen, plug_start); assert (generation_plan_allocation_start (condemned_gen1)); } if (seg1 == ephemeral_heap_segment) { process_ephemeral_boundaries (plug_start, active_new_gen_number, active_old_gen_number, consing_gen, allocate_in_condemned); } #endif //!USE_REGIONS dprintf (3, ("adding %Id to gen%d surv", ps, active_old_gen_number)); dynamic_data* dd_active_old = dynamic_data_of (active_old_gen_number); dd_survived_size (dd_active_old) += ps; BOOL convert_to_pinned_p = FALSE; BOOL allocated_in_older_p = FALSE; if (!pinned_plug_p) { #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN) dd_num_npinned_plugs (dd_active_old)++; #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN add_gen_plug (active_old_gen_number, ps); if (allocate_in_condemned) { verify_pins_with_post_plug_info("before aic"); new_address = allocate_in_condemned_generations (consing_gen, ps, active_old_gen_number, #ifdef SHORT_PLUGS &convert_to_pinned_p, (npin_before_pin_p ? plug_end : 0), seg1, #endif //SHORT_PLUGS plug_start REQD_ALIGN_AND_OFFSET_ARG); verify_pins_with_post_plug_info("after aic"); } else { new_address = allocate_in_older_generation (older_gen, ps, active_old_gen_number, plug_start REQD_ALIGN_AND_OFFSET_ARG); if (new_address != 0) { allocated_in_older_p = TRUE; if (settings.condemned_generation == (max_generation - 1)) { dprintf (3, (" NA: %Ix-%Ix -> %Ix, %Ix (%Ix)", plug_start, plug_end, (size_t)new_address, (size_t)new_address + (plug_end - plug_start), (size_t)(plug_end - plug_start))); } } else { if (generation_allocator(older_gen)->discard_if_no_fit_p()) { allocate_in_condemned = TRUE; } new_address = allocate_in_condemned_generations (consing_gen, ps, active_old_gen_number, #ifdef SHORT_PLUGS &convert_to_pinned_p, (npin_before_pin_p ? plug_end : 0), seg1, #endif //SHORT_PLUGS plug_start REQD_ALIGN_AND_OFFSET_ARG); } } #ifdef FEATURE_EVENT_TRACE if (record_fl_info_p && !allocated_in_older_p) { add_plug_in_condemned_info (older_gen, ps); recorded_fl_info_size += ps; } #endif //FEATURE_EVENT_TRACE if (convert_to_pinned_p) { assert (last_npinned_plug_p != FALSE); assert (last_pinned_plug_p == FALSE); convert_to_pinned_plug (last_npinned_plug_p, last_pinned_plug_p, pinned_plug_p, ps, artificial_pinned_size); enque_pinned_plug (plug_start, FALSE, 0); last_pinned_plug = plug_start; } else { if (!new_address) { //verify that we are at then end of the ephemeral segment assert (generation_allocation_segment (consing_gen) == ephemeral_heap_segment); //verify that we are near the end assert ((generation_allocation_pointer (consing_gen) + Align (ps)) < heap_segment_allocated (ephemeral_heap_segment)); assert ((generation_allocation_pointer (consing_gen) + Align (ps)) > (heap_segment_allocated (ephemeral_heap_segment) + Align (min_obj_size))); } else { dprintf (3, (ThreadStressLog::gcPlanPlugMsg(), (size_t)(node_gap_size (plug_start)), plug_start, plug_end, (size_t)new_address, (size_t)(plug_start - new_address), (size_t)new_address + ps, ps, (is_plug_padded (plug_start) ? 1 : 0), x, (allocated_in_older_p ? "O" : "C"))); #ifdef SHORT_PLUGS if (is_plug_padded (plug_start)) { dprintf (3, ("%Ix was padded", plug_start)); dd_padding_size (dd_active_old) += Align (min_obj_size); } #endif //SHORT_PLUGS } } } if (pinned_plug_p) { #ifdef FEATURE_EVENT_TRACE if (fire_pinned_plug_events_p) { FIRE_EVENT(PinPlugAtGCTime, plug_start, plug_end, (merge_with_last_pin_p ? 0 : (uint8_t*)node_gap_size (plug_start))); } #endif //FEATURE_EVENT_TRACE if (merge_with_last_pin_p) { merge_with_last_pinned_plug (last_pinned_plug, ps); } else { assert (last_pinned_plug == plug_start); set_pinned_info (plug_start, ps, consing_gen); } new_address = plug_start; dprintf (3, (ThreadStressLog::gcPlanPinnedPlugMsg(), (size_t)(node_gap_size (plug_start)), (size_t)plug_start, (size_t)plug_end, ps, (merge_with_last_pin_p ? 1 : 0))); dprintf (3, ("adding %Id to gen%d pinned surv", plug_end - plug_start, active_old_gen_number)); size_t pinned_plug_size = plug_end - plug_start; #ifdef USE_REGIONS pinned_survived_region += (int)pinned_plug_size; #endif //USE_REGIONS dd_pinned_survived_size (dd_active_old) += pinned_plug_size; dd_added_pinned_size (dd_active_old) += added_pinning_size; dd_artificial_pinned_survived_size (dd_active_old) += artificial_pinned_size; if (!demote_gen1_p && (active_old_gen_number == (max_generation - 1))) { last_gen1_pin_end = plug_end; } } #ifdef _DEBUG // detect forward allocation in the same segment assert (!((new_address > plug_start) && (new_address < heap_segment_reserved (seg1)))); #endif //_DEBUG if (!merge_with_last_pin_p) { if (current_brick != brick_of (plug_start)) { current_brick = update_brick_table (tree, current_brick, plug_start, saved_plug_end); sequence_number = 0; tree = 0; } set_node_relocation_distance (plug_start, (new_address - plug_start)); if (last_node && (node_relocation_distance (last_node) == (node_relocation_distance (plug_start) + (ptrdiff_t)node_gap_size (plug_start)))) { //dprintf(3,( " Lb")); dprintf (3, ("%Ix Lb", plug_start)); set_node_left (plug_start); } if (0 == sequence_number) { dprintf (2, ("sn: 0, tree is set to %Ix", plug_start)); tree = plug_start; } verify_pins_with_post_plug_info("before insert node"); tree = insert_node (plug_start, ++sequence_number, tree, last_node); dprintf (3, ("tree is %Ix (b: %Ix) after insert_node(lc: %Ix, rc: %Ix)", tree, brick_of (tree), (tree + node_left_child (tree)), (tree + node_right_child (tree)))); last_node = plug_start; #ifdef _DEBUG // If we detect if the last plug is pinned plug right before us, we should save this gap info if (!pinned_plug_p) { if (mark_stack_tos > 0) { mark& m = mark_stack_array[mark_stack_tos - 1]; if (m.has_post_plug_info()) { uint8_t* post_plug_info_start = m.saved_post_plug_info_start; size_t* current_plug_gap_start = (size_t*)(plug_start - sizeof (plug_and_gap)); if ((uint8_t*)current_plug_gap_start == post_plug_info_start) { dprintf (3, ("Ginfo: %Ix, %Ix, %Ix", *current_plug_gap_start, *(current_plug_gap_start + 1), *(current_plug_gap_start + 2))); memcpy (&(m.saved_post_plug_debug), current_plug_gap_start, sizeof (gap_reloc_pair)); } } } } #endif //_DEBUG verify_pins_with_post_plug_info("after insert node"); } } if (num_pinned_plugs_in_plug > 1) { dprintf (3, ("more than %Id pinned plugs in this plug", num_pinned_plugs_in_plug)); } x = find_next_marked (x, end, use_mark_list, mark_list_next, mark_list_index); } #ifndef USE_REGIONS while (!pinned_plug_que_empty_p()) { if (settings.promotion) { uint8_t* pplug = pinned_plug (oldest_pin()); if (in_range_for_segment (pplug, ephemeral_heap_segment)) { consing_gen = ensure_ephemeral_heap_segment (consing_gen); //allocate all of the generation gaps while (active_new_gen_number > 0) { active_new_gen_number--; if (active_new_gen_number == (max_generation - 1)) { maxgen_pinned_compact_before_advance = generation_pinned_allocation_compact_size (generation_of (max_generation)); if (!demote_gen1_p) advance_pins_for_demotion (consing_gen); } generation* gen = generation_of (active_new_gen_number); plan_generation_start (gen, consing_gen, 0); if (demotion_low == MAX_PTR) { demotion_low = pplug; dprintf (3, ("end plan: dlow->%Ix", demotion_low)); } dprintf (2, ("(%d)gen%d plan start: %Ix", heap_number, active_new_gen_number, (size_t)generation_plan_allocation_start (gen))); assert (generation_plan_allocation_start (gen)); } } } if (pinned_plug_que_empty_p()) break; size_t entry = deque_pinned_plug(); mark* m = pinned_plug_of (entry); uint8_t* plug = pinned_plug (m); size_t len = pinned_len (m); // detect pinned block in different segment (later) than // allocation segment heap_segment* nseg = heap_segment_rw (generation_allocation_segment (consing_gen)); while ((plug < generation_allocation_pointer (consing_gen)) || (plug >= heap_segment_allocated (nseg))) { assert ((plug < heap_segment_mem (nseg)) || (plug > heap_segment_reserved (nseg))); //adjust the end of the segment to be the end of the plug assert (generation_allocation_pointer (consing_gen)>= heap_segment_mem (nseg)); assert (generation_allocation_pointer (consing_gen)<= heap_segment_committed (nseg)); heap_segment_plan_allocated (nseg) = generation_allocation_pointer (consing_gen); //switch allocation segment nseg = heap_segment_next_rw (nseg); generation_allocation_segment (consing_gen) = nseg; //reset the allocation pointer and limits generation_allocation_pointer (consing_gen) = heap_segment_mem (nseg); } set_new_pin_info (m, generation_allocation_pointer (consing_gen)); dprintf (2, ("pin %Ix b: %Ix->%Ix", plug, brick_of (plug), (size_t)(brick_table[brick_of (plug)]))); generation_allocation_pointer (consing_gen) = plug + len; generation_allocation_limit (consing_gen) = generation_allocation_pointer (consing_gen); //Add the size of the pinned plug to the right pinned allocations //find out which gen this pinned plug came from int frgn = object_gennum (plug); if ((frgn != (int)max_generation) && settings.promotion) { generation_pinned_allocation_sweep_size ((generation_of (frgn +1))) += len; } } plan_generation_starts (consing_gen); #endif //!USE_REGIONS descr_generations ("AP"); print_free_and_plug ("AP"); { #ifdef SIMPLE_DPRINTF for (int gen_idx = 0; gen_idx <= max_generation; gen_idx++) { generation* temp_gen = generation_of (gen_idx); dynamic_data* temp_dd = dynamic_data_of (gen_idx); int added_pinning_ratio = 0; int artificial_pinned_ratio = 0; if (dd_pinned_survived_size (temp_dd) != 0) { added_pinning_ratio = (int)((float)dd_added_pinned_size (temp_dd) * 100 / (float)dd_pinned_survived_size (temp_dd)); artificial_pinned_ratio = (int)((float)dd_artificial_pinned_survived_size (temp_dd) * 100 / (float)dd_pinned_survived_size (temp_dd)); } size_t padding_size = #ifdef SHORT_PLUGS dd_padding_size (temp_dd); #else 0; #endif //SHORT_PLUGS dprintf (1, ("gen%d: NON PIN alloc: %Id, pin com: %Id, sweep: %Id, surv: %Id, pinsurv: %Id(%d%% added, %d%% art), np surv: %Id, pad: %Id", gen_idx, generation_allocation_size (temp_gen), generation_pinned_allocation_compact_size (temp_gen), generation_pinned_allocation_sweep_size (temp_gen), dd_survived_size (temp_dd), dd_pinned_survived_size (temp_dd), added_pinning_ratio, artificial_pinned_ratio, (dd_survived_size (temp_dd) - dd_pinned_survived_size (temp_dd)), padding_size)); #ifndef USE_REGIONS dprintf (1, ("gen%d: %Ix, %Ix(%Id)", gen_idx, generation_allocation_start (temp_gen), generation_plan_allocation_start (temp_gen), (size_t)(generation_plan_allocation_start (temp_gen) - generation_allocation_start (temp_gen)))); #endif //USE_REGIONS } #endif //SIMPLE_DPRINTF } if (settings.condemned_generation == (max_generation - 1 )) { generation* older_gen = generation_of (settings.condemned_generation + 1); size_t rejected_free_space = generation_free_obj_space (older_gen) - r_free_obj_space; size_t free_list_allocated = generation_free_list_allocated (older_gen) - r_older_gen_free_list_allocated; size_t end_seg_allocated = generation_end_seg_allocated (older_gen) - r_older_gen_end_seg_allocated; size_t condemned_allocated = generation_condemned_allocated (older_gen) - r_older_gen_condemned_allocated; size_t growth = end_seg_allocated + condemned_allocated; if (growth > 0) { dprintf (1, ("gen2 grew %Id (end seg alloc: %Id, condemned alloc: %Id", growth, end_seg_allocated, condemned_allocated)); maxgen_size_inc_p = true; } else { dprintf (2, ("gen2 didn't grow (end seg alloc: %Id, , condemned alloc: %Id, gen1 c alloc: %Id", end_seg_allocated, condemned_allocated, generation_condemned_allocated (generation_of (max_generation - 1)))); } dprintf (1, ("older gen's free alloc: %Id->%Id, seg alloc: %Id->%Id, condemned alloc: %Id->%Id", r_older_gen_free_list_allocated, generation_free_list_allocated (older_gen), r_older_gen_end_seg_allocated, generation_end_seg_allocated (older_gen), r_older_gen_condemned_allocated, generation_condemned_allocated (older_gen))); dprintf (1, ("this GC did %Id free list alloc(%Id bytes free space rejected)", free_list_allocated, rejected_free_space)); maxgen_size_increase* maxgen_size_info = &(get_gc_data_per_heap()->maxgen_size_info); maxgen_size_info->free_list_allocated = free_list_allocated; maxgen_size_info->free_list_rejected = rejected_free_space; maxgen_size_info->end_seg_allocated = end_seg_allocated; maxgen_size_info->condemned_allocated = condemned_allocated; maxgen_size_info->pinned_allocated = maxgen_pinned_compact_before_advance; maxgen_size_info->pinned_allocated_advance = generation_pinned_allocation_compact_size (generation_of (max_generation)) - maxgen_pinned_compact_before_advance; #ifdef FREE_USAGE_STATS int free_list_efficiency = 0; if ((free_list_allocated + rejected_free_space) != 0) free_list_efficiency = (int)(((float) (free_list_allocated) / (float)(free_list_allocated + rejected_free_space)) * (float)100); int running_free_list_efficiency = (int)(generation_allocator_efficiency(older_gen)*100); dprintf (1, ("gen%d free list alloc effi: %d%%, current effi: %d%%", older_gen->gen_num, free_list_efficiency, running_free_list_efficiency)); dprintf (1, ("gen2 free list change")); for (int j = 0; j < NUM_GEN_POWER2; j++) { dprintf (1, ("[h%d][#%Id]: 2^%d: F: %Id->%Id(%Id), P: %Id", heap_number, settings.gc_index, (j + 10), r_older_gen_free_space[j], older_gen->gen_free_spaces[j], (ptrdiff_t)(r_older_gen_free_space[j] - older_gen->gen_free_spaces[j]), (generation_of(max_generation - 1))->gen_plugs[j])); } #endif //FREE_USAGE_STATS } size_t fragmentation = generation_fragmentation (generation_of (condemned_gen_number), consing_gen, heap_segment_allocated (ephemeral_heap_segment)); dprintf (2,("Fragmentation: %Id", fragmentation)); dprintf (2,("---- End of Plan phase ----")); // We may update write barrier code. We assume here EE has been suspended if we are on a GC thread. assert(IsGCInProgress()); BOOL should_expand = FALSE; BOOL should_compact= FALSE; ephemeral_promotion = FALSE; #ifdef HOST_64BIT if ((!settings.concurrent) && #ifdef USE_REGIONS !special_sweep_p && #endif //USE_REGIONS !provisional_mode_triggered && ((condemned_gen_number < max_generation) && ((settings.gen0_reduction_count > 0) || (settings.entry_memory_load >= 95)))) { dprintf (GTC_LOG, ("gen0 reduction count is %d, condemning %d, mem load %d", settings.gen0_reduction_count, condemned_gen_number, settings.entry_memory_load)); should_compact = TRUE; get_gc_data_per_heap()->set_mechanism (gc_heap_compact, ((settings.gen0_reduction_count > 0) ? compact_fragmented_gen0 : compact_high_mem_load)); #ifndef USE_REGIONS if ((condemned_gen_number >= (max_generation - 1)) && dt_low_ephemeral_space_p (tuning_deciding_expansion)) { dprintf (GTC_LOG, ("Not enough space for all ephemeral generations with compaction")); should_expand = TRUE; } #endif //!USE_REGIONS } else #endif // HOST_64BIT { should_compact = decide_on_compacting (condemned_gen_number, fragmentation, should_expand); } #ifdef FEATURE_LOH_COMPACTION loh_compacted_p = FALSE; #endif //FEATURE_LOH_COMPACTION if (condemned_gen_number == max_generation) { #ifdef FEATURE_LOH_COMPACTION if (settings.loh_compaction) { if (plan_loh()) { should_compact = TRUE; get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_loh_forced); loh_compacted_p = TRUE; } } else { if ((heap_number == 0) && (loh_pinned_queue)) { loh_pinned_queue_decay--; if (!loh_pinned_queue_decay) { delete loh_pinned_queue; loh_pinned_queue = 0; } } } if (!loh_compacted_p) #endif //FEATURE_LOH_COMPACTION { GCToEEInterface::DiagWalkUOHSurvivors(__this, loh_generation); sweep_uoh_objects (loh_generation); } GCToEEInterface::DiagWalkUOHSurvivors(__this, poh_generation); sweep_uoh_objects (poh_generation); } else { settings.loh_compaction = FALSE; } #ifdef MULTIPLE_HEAPS new_heap_segment = NULL; if (should_compact && should_expand) gc_policy = policy_expand; else if (should_compact) gc_policy = policy_compact; else gc_policy = policy_sweep; //vote for result of should_compact dprintf (3, ("Joining for compaction decision")); gc_t_join.join(this, gc_join_decide_on_compaction); if (gc_t_join.joined()) { //safe place to delete large heap segments if (condemned_gen_number == max_generation) { for (int i = 0; i < n_heaps; i++) { g_heaps [i]->rearrange_uoh_segments (); } } if (maxgen_size_inc_p && provisional_mode_triggered #ifdef BACKGROUND_GC && !is_bgc_in_progress() #endif //BACKGROUND_GC ) { pm_trigger_full_gc = true; dprintf (GTC_LOG, ("in PM: maxgen size inc, doing a sweeping gen1 and trigger NGC2")); } else { #ifndef USE_REGIONS settings.demotion = FALSE; #endif //!USE_REGIONS int pol_max = policy_sweep; #ifdef GC_CONFIG_DRIVEN BOOL is_compaction_mandatory = FALSE; #endif //GC_CONFIG_DRIVEN int i; for (i = 0; i < n_heaps; i++) { if (pol_max < g_heaps[i]->gc_policy) pol_max = policy_compact; #ifndef USE_REGIONS // set the demotion flag is any of the heap has demotion if (g_heaps[i]->demotion_high >= g_heaps[i]->demotion_low) { (g_heaps[i]->get_gc_data_per_heap())->set_mechanism_bit (gc_demotion_bit); settings.demotion = TRUE; } #endif //!USE_REGIONS #ifdef GC_CONFIG_DRIVEN if (!is_compaction_mandatory) { int compact_reason = (g_heaps[i]->get_gc_data_per_heap())->get_mechanism (gc_heap_compact); if (compact_reason >= 0) { if (gc_heap_compact_reason_mandatory_p[compact_reason]) is_compaction_mandatory = TRUE; } } #endif //GC_CONFIG_DRIVEN } #ifdef GC_CONFIG_DRIVEN if (!is_compaction_mandatory) { // If compaction is not mandatory we can feel free to change it to a sweeping GC. // Note that we may want to change this to only checking every so often instead of every single GC. if (should_do_sweeping_gc (pol_max >= policy_compact)) { pol_max = policy_sweep; } else { if (pol_max == policy_sweep) pol_max = policy_compact; } } #endif //GC_CONFIG_DRIVEN for (i = 0; i < n_heaps; i++) { if (pol_max > g_heaps[i]->gc_policy) g_heaps[i]->gc_policy = pol_max; #ifndef USE_REGIONS //get the segment while we are serialized if (g_heaps[i]->gc_policy == policy_expand) { g_heaps[i]->new_heap_segment = g_heaps[i]->soh_get_segment_to_expand(); if (!g_heaps[i]->new_heap_segment) { set_expand_in_full_gc (condemned_gen_number); //we are out of memory, cancel the expansion g_heaps[i]->gc_policy = policy_compact; } } #endif //!USE_REGIONS } BOOL is_full_compacting_gc = FALSE; if ((gc_policy >= policy_compact) && (condemned_gen_number == max_generation)) { full_gc_counts[gc_type_compacting]++; is_full_compacting_gc = TRUE; } for (i = 0; i < n_heaps; i++) { //copy the card and brick tables if (g_gc_card_table!= g_heaps[i]->card_table) { g_heaps[i]->copy_brick_card_table(); } if (is_full_compacting_gc) { g_heaps[i]->loh_alloc_since_cg = 0; } } } #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { gc_time_info[time_sweep] = GetHighPrecisionTimeStamp(); gc_time_info[time_plan] = gc_time_info[time_sweep] - gc_time_info[time_plan]; } #endif //FEATURE_EVENT_TRACE dprintf(3, ("Starting all gc threads after compaction decision")); gc_t_join.restart(); } should_compact = (gc_policy >= policy_compact); should_expand = (gc_policy >= policy_expand); #else //MULTIPLE_HEAPS //safe place to delete large heap segments if (condemned_gen_number == max_generation) { rearrange_uoh_segments (); } if (maxgen_size_inc_p && provisional_mode_triggered #ifdef BACKGROUND_GC && !is_bgc_in_progress() #endif //BACKGROUND_GC ) { pm_trigger_full_gc = true; dprintf (GTC_LOG, ("in PM: maxgen size inc, doing a sweeping gen1 and trigger NGC2")); } else { #ifndef USE_REGIONS // for regions it was already set when we set plan_gen_num for regions. settings.demotion = ((demotion_high >= demotion_low) ? TRUE : FALSE); if (settings.demotion) get_gc_data_per_heap()->set_mechanism_bit (gc_demotion_bit); #endif //!USE_REGIONS #ifdef GC_CONFIG_DRIVEN BOOL is_compaction_mandatory = FALSE; int compact_reason = get_gc_data_per_heap()->get_mechanism (gc_heap_compact); if (compact_reason >= 0) is_compaction_mandatory = gc_heap_compact_reason_mandatory_p[compact_reason]; if (!is_compaction_mandatory) { if (should_do_sweeping_gc (should_compact)) should_compact = FALSE; else should_compact = TRUE; } #endif //GC_CONFIG_DRIVEN if (should_compact && (condemned_gen_number == max_generation)) { full_gc_counts[gc_type_compacting]++; loh_alloc_since_cg = 0; } } #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { gc_time_info[time_sweep] = GetHighPrecisionTimeStamp(); gc_time_info[time_plan] = gc_time_info[time_sweep] - gc_time_info[time_plan]; } #endif //FEATURE_EVENT_TRACE #endif //MULTIPLE_HEAPS if (!pm_trigger_full_gc && pm_stress_on && provisional_mode_triggered) { if ((settings.condemned_generation == (max_generation - 1)) && ((settings.gc_index % 5) == 0) #ifdef BACKGROUND_GC && !is_bgc_in_progress() #endif //BACKGROUND_GC ) { pm_trigger_full_gc = true; } } if (settings.condemned_generation == (max_generation - 1)) { if (provisional_mode_triggered) { if (should_expand) { should_expand = FALSE; dprintf (GTC_LOG, ("h%d in PM cannot expand", heap_number)); } } if (pm_trigger_full_gc) { should_compact = FALSE; dprintf (GTC_LOG, ("h%d PM doing sweeping", heap_number)); } } if (should_compact) { dprintf (2,( "**** Doing Compacting GC ****")); #if defined(USE_REGIONS) && defined(BACKGROUND_GC) if (should_update_end_mark_size()) { background_soh_size_end_mark += generation_end_seg_allocated (older_gen) - r_older_gen_end_seg_allocated; } #endif //USE_REGIONS && BACKGROUND_GC #ifndef USE_REGIONS if (should_expand) { #ifndef MULTIPLE_HEAPS heap_segment* new_heap_segment = soh_get_segment_to_expand(); #endif //!MULTIPLE_HEAPS if (new_heap_segment) { consing_gen = expand_heap(condemned_gen_number, consing_gen, new_heap_segment); } // If we couldn't get a new segment, or we were able to // reserve one but no space to commit, we couldn't // expand heap. if (ephemeral_heap_segment != new_heap_segment) { set_expand_in_full_gc (condemned_gen_number); should_expand = FALSE; } } #endif //!USE_REGIONS generation_allocation_limit (condemned_gen1) = generation_allocation_pointer (condemned_gen1); if ((condemned_gen_number < max_generation)) { generation_allocator (older_gen)->commit_alloc_list_changes(); // Fix the allocation area of the older generation fix_older_allocation_area (older_gen); #ifdef FEATURE_EVENT_TRACE if (record_fl_info_p) { // For plugs allocated in condemned we kept track of each one but only fire the // event for buckets with non zero items. uint16_t non_zero_buckets = 0; for (uint16_t bucket_index = 0; bucket_index < NUM_GEN2_ALIST; bucket_index++) { if (bucket_info[bucket_index].count != 0) { if (bucket_index != non_zero_buckets) { bucket_info[non_zero_buckets].set (bucket_index, bucket_info[bucket_index].count, bucket_info[bucket_index].size); } else { bucket_info[bucket_index].index = bucket_index; } non_zero_buckets++; } } if (non_zero_buckets) { FIRE_EVENT(GCFitBucketInfo, (uint16_t)etw_bucket_kind::plugs_in_condemned, recorded_fl_info_size, non_zero_buckets, (uint32_t)(sizeof (etw_bucket_info)), (void *)bucket_info); init_bucket_info(); } // We want to get an idea of the sizes of free items in the top 25% of the free list // for gen2 (to be accurate - we stop as soon as the size we count exceeds 25%. This // is just so that if we have a really big free item we will still count that one). // The idea is we want to see if they all in a few big ones or many smaller ones? // To limit the amount of time we spend counting, we stop till we have counted the // top percentage, or exceeded max_etw_item_count items. size_t max_size_to_count = generation_free_list_space (older_gen) / 4; non_zero_buckets = generation_allocator (older_gen)->count_largest_items (bucket_info, max_size_to_count, max_etw_item_count, &recorded_fl_info_size); if (non_zero_buckets) { FIRE_EVENT(GCFitBucketInfo, (uint16_t)etw_bucket_kind::largest_fl_items, recorded_fl_info_size, non_zero_buckets, (uint32_t)(sizeof (etw_bucket_info)), (void *)bucket_info); } } #endif //FEATURE_EVENT_TRACE } #ifndef USE_REGIONS assert (generation_allocation_segment (consing_gen) == ephemeral_heap_segment); #endif //!USE_REGIONS GCToEEInterface::DiagWalkSurvivors(__this, true); relocate_phase (condemned_gen_number, first_condemned_address); compact_phase (condemned_gen_number, first_condemned_address, (!settings.demotion && settings.promotion)); fix_generation_bounds (condemned_gen_number, consing_gen); assert (generation_allocation_limit (youngest_generation) == generation_allocation_pointer (youngest_generation)); #ifndef USE_REGIONS if (condemned_gen_number >= (max_generation -1)) { #ifdef MULTIPLE_HEAPS // this needs be serialized just because we have one // segment_standby_list/seg_table for all heaps. We should make it at least // so that when hoarding is not on we don't need this join because // decommitting memory can take a long time. //must serialize on deleting segments gc_t_join.join(this, gc_join_rearrange_segs_compaction); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { uint64_t current_time = GetHighPrecisionTimeStamp(); gc_time_info[time_compact] = current_time - gc_time_info[time_compact]; } #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { g_heaps[i]->rearrange_heap_segments(TRUE); } #else //MULTIPLE_HEAPS rearrange_heap_segments(TRUE); #endif //MULTIPLE_HEAPS #ifdef MULTIPLE_HEAPS gc_t_join.restart(); #endif //MULTIPLE_HEAPS } if (should_expand) { //fix the start_segment for the ephemeral generations for (int i = 0; i < max_generation; i++) { generation* gen = generation_of (i); generation_start_segment (gen) = ephemeral_heap_segment; generation_allocation_segment (gen) = ephemeral_heap_segment; } } } #endif //!USE_REGIONS { #ifdef MULTIPLE_HEAPS dprintf(3, ("Joining after end of compaction")); gc_t_join.join(this, gc_join_adjust_handle_age_compact); if (gc_t_join.joined()) { #endif //MULTIPLE_HEAPS #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p && (condemned_gen_number < (max_generation -1))) { uint64_t current_time = GetHighPrecisionTimeStamp(); gc_time_info[time_compact] = current_time - gc_time_info[time_compact]; } #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS //join all threads to make sure they are synchronized dprintf(3, ("Restarting after Promotion granted")); gc_t_join.restart(); } #endif //MULTIPLE_HEAPS #ifdef FEATURE_PREMORTEM_FINALIZATION finalize_queue->UpdatePromotedGenerations (condemned_gen_number, (!settings.demotion && settings.promotion)); #endif // FEATURE_PREMORTEM_FINALIZATION ScanContext sc; sc.thread_number = heap_number; sc.promotion = FALSE; sc.concurrent = FALSE; // new generations bounds are set can call this guy if (settings.promotion && !settings.demotion) { dprintf (2, ("Promoting EE roots for gen %d", condemned_gen_number)); GCScan::GcPromotionsGranted(condemned_gen_number, max_generation, &sc); } else if (settings.demotion) { dprintf (2, ("Demoting EE roots for gen %d", condemned_gen_number)); GCScan::GcDemote (condemned_gen_number, max_generation, &sc); } } { reset_pinned_queue_bos(); #ifndef USE_REGIONS unsigned int gen_number = min (max_generation, 1 + condemned_gen_number); generation* gen = generation_of (gen_number); uint8_t* low = generation_allocation_start (generation_of (gen_number-1)); uint8_t* high = heap_segment_allocated (ephemeral_heap_segment); #endif //!USE_REGIONS while (!pinned_plug_que_empty_p()) { mark* m = pinned_plug_of (deque_pinned_plug()); size_t len = pinned_len (m); uint8_t* arr = (pinned_plug (m) - len); dprintf(3,("free [%Ix %Ix[ pin", (size_t)arr, (size_t)arr + len)); if (len != 0) { assert (len >= Align (min_obj_size)); make_unused_array (arr, len); // fix fully contained bricks + first one // if the array goes beyond the first brick size_t start_brick = brick_of (arr); size_t end_brick = brick_of (arr + len); if (end_brick != start_brick) { dprintf (3, ("Fixing bricks [%Ix, %Ix[ to point to unused array %Ix", start_brick, end_brick, (size_t)arr)); set_brick (start_brick, arr - brick_address (start_brick)); size_t brick = start_brick+1; while (brick < end_brick) { set_brick (brick, start_brick - brick); brick++; } } #ifdef USE_REGIONS int gen_number = object_gennum_plan (arr); generation* gen = generation_of (gen_number); #else //when we take an old segment to make the new //ephemeral segment. we can have a bunch of //pinned plugs out of order going to the new ephemeral seg //and then the next plugs go back to max_generation if ((heap_segment_mem (ephemeral_heap_segment) <= arr) && (heap_segment_reserved (ephemeral_heap_segment) > arr)) { while ((low <= arr) && (high > arr)) { gen_number--; assert ((gen_number >= 1) || (demotion_low != MAX_PTR) || settings.demotion || !settings.promotion); dprintf (3, ("new free list generation %d", gen_number)); gen = generation_of (gen_number); if (gen_number >= 1) low = generation_allocation_start (generation_of (gen_number-1)); else low = high; } } else { dprintf (3, ("new free list generation %d", max_generation)); gen_number = max_generation; gen = generation_of (gen_number); } #endif //USE_REGIONS dprintf(3,("h%d threading %Ix (%Id) before pin in gen %d", heap_number, arr, len, gen_number)); thread_gap (arr, len, gen); add_gen_free (gen_number, len); } } } clear_gen1_cards(); } else { //force promotion for sweep settings.promotion = TRUE; settings.compaction = FALSE; #ifdef USE_REGIONS // This should be set for segs too actually. We should always reset demotion // if we sweep. settings.demotion = FALSE; #endif //USE_REGIONS ScanContext sc; sc.thread_number = heap_number; sc.promotion = FALSE; sc.concurrent = FALSE; dprintf (2, ("**** Doing Mark and Sweep GC****")); if ((condemned_gen_number < max_generation)) { #ifdef FREE_USAGE_STATS memcpy (older_gen->gen_free_spaces, r_older_gen_free_space, sizeof (r_older_gen_free_space)); #endif //FREE_USAGE_STATS generation_allocator (older_gen)->copy_from_alloc_list (r_free_list); generation_free_list_space (older_gen) = r_free_list_space; generation_free_obj_space (older_gen) = r_free_obj_space; #ifdef DOUBLY_LINKED_FL if (condemned_gen_number == (max_generation - 1)) { dprintf (2, ("[h%d] no undo, FL %Id-%Id -> %Id, FO %Id+%Id=%Id", heap_number, generation_free_list_space (older_gen), gen2_removed_no_undo, (generation_free_list_space (older_gen) - gen2_removed_no_undo), generation_free_obj_space (older_gen), gen2_removed_no_undo, (generation_free_obj_space (older_gen) + gen2_removed_no_undo))); generation_free_list_space (older_gen) -= gen2_removed_no_undo; generation_free_obj_space (older_gen) += gen2_removed_no_undo; } #endif //DOUBLY_LINKED_FL generation_free_list_allocated (older_gen) = r_older_gen_free_list_allocated; generation_end_seg_allocated (older_gen) = r_older_gen_end_seg_allocated; generation_condemned_allocated (older_gen) = r_older_gen_condemned_allocated; generation_sweep_allocated (older_gen) += dd_survived_size (dynamic_data_of (condemned_gen_number)); generation_allocation_limit (older_gen) = r_allocation_limit; generation_allocation_pointer (older_gen) = r_allocation_pointer; generation_allocation_context_start_region (older_gen) = r_allocation_start_region; generation_allocation_segment (older_gen) = r_allocation_segment; #ifdef USE_REGIONS if (older_gen->gen_num == max_generation) { check_seg_gen_num (r_allocation_segment); } #endif //USE_REGIONS } if ((condemned_gen_number < max_generation)) { // Fix the allocation area of the older generation fix_older_allocation_area (older_gen); } GCToEEInterface::DiagWalkSurvivors(__this, false); make_free_lists (condemned_gen_number); size_t total_recovered_sweep_size = recover_saved_pinned_info(); if (total_recovered_sweep_size > 0) { generation_free_obj_space (generation_of (max_generation)) -= total_recovered_sweep_size; dprintf (2, ("h%d: deduct %Id for pin, fo->%Id", heap_number, total_recovered_sweep_size, generation_free_obj_space (generation_of (max_generation)))); } #ifdef MULTIPLE_HEAPS dprintf(3, ("Joining after end of sweep")); gc_t_join.join(this, gc_join_adjust_handle_age_sweep); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { uint64_t current_time = GetHighPrecisionTimeStamp(); gc_time_info[time_sweep] = current_time - gc_time_info[time_sweep]; } #endif //FEATURE_EVENT_TRACE if (!special_sweep_p) { GCScan::GcPromotionsGranted(condemned_gen_number, max_generation, &sc); } #ifndef USE_REGIONS if (condemned_gen_number >= (max_generation -1)) { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { g_heaps[i]->rearrange_heap_segments(FALSE); } #else rearrange_heap_segments(FALSE); #endif //MULTIPLE_HEAPS } #endif //!USE_REGIONS #ifdef MULTIPLE_HEAPS //join all threads to make sure they are synchronized dprintf(3, ("Restarting after Promotion granted")); gc_t_join.restart(); #endif //MULTIPLE_HEAPS } #ifdef FEATURE_PREMORTEM_FINALIZATION if (!special_sweep_p) { finalize_queue->UpdatePromotedGenerations (condemned_gen_number, TRUE); } #endif // FEATURE_PREMORTEM_FINALIZATION if (!special_sweep_p) { clear_gen1_cards(); } } //verify_partial(); } #ifdef _PREFAST_ #pragma warning(pop) #endif //_PREFAST_ /***************************** Called after compact phase to fix all generation gaps ********************************/ void gc_heap::fix_generation_bounds (int condemned_gen_number, generation* consing_gen) { #ifndef _DEBUG UNREFERENCED_PARAMETER(consing_gen); #endif //_DEBUG int gen_number = condemned_gen_number; dprintf (2, ("---- thread regions gen%d GC ----", gen_number)); #ifdef USE_REGIONS // For ephemeral GCs, we handle up till the generation_allocation_segment as that's the last one we // changed in the older gen. if (settings.promotion && (condemned_gen_number < max_generation)) { int older_gen_number = condemned_gen_number + 1; generation* older_gen = generation_of (older_gen_number); heap_segment* last_alloc_region = generation_allocation_segment (older_gen); dprintf (REGIONS_LOG, ("fix till we see alloc region which is %Ix", heap_segment_mem (last_alloc_region))); heap_segment* region = heap_segment_rw (generation_start_segment (older_gen)); while (region) { heap_segment_allocated (region) = heap_segment_plan_allocated (region); if (region == last_alloc_region) break; region = heap_segment_next (region); } } thread_final_regions (true); ephemeral_heap_segment = generation_start_segment (generation_of (0)); alloc_allocated = heap_segment_allocated (ephemeral_heap_segment); #else //USE_REGIONS assert (generation_allocation_segment (consing_gen) == ephemeral_heap_segment); int bottom_gen = 0; while (gen_number >= bottom_gen) { generation* gen = generation_of (gen_number); dprintf(3,("Fixing generation pointers for %Ix", gen_number)); if ((gen_number < max_generation) && ephemeral_promotion) { size_t saved_eph_start_size = saved_ephemeral_plan_start_size[gen_number]; make_unused_array (saved_ephemeral_plan_start[gen_number], saved_eph_start_size); generation_free_obj_space (generation_of (max_generation)) += saved_eph_start_size; dprintf (2, ("[h%d] EP %Ix(%Id)", heap_number, saved_ephemeral_plan_start[gen_number], saved_ephemeral_plan_start_size[gen_number])); } reset_allocation_pointers (gen, generation_plan_allocation_start (gen)); make_unused_array (generation_allocation_start (gen), generation_plan_allocation_start_size (gen)); dprintf(3,(" start %Ix", (size_t)generation_allocation_start (gen))); gen_number--; } #ifdef MULTIPLE_HEAPS if (ephemeral_promotion) { //we are creating a generation fault. set the cards. // and we are only doing this for multiple heaps because in the single heap scenario the // new ephemeral generations will be empty and there'll be no need to set cards for the // old ephemeral generations that got promoted into max_generation. ptrdiff_t delta = 0; heap_segment* old_ephemeral_seg = seg_mapping_table_segment_of (saved_ephemeral_plan_start[max_generation-1]); assert (in_range_for_segment (saved_ephemeral_plan_start[max_generation-1], old_ephemeral_seg)); size_t end_card = card_of (align_on_card (heap_segment_plan_allocated (old_ephemeral_seg))); size_t card = card_of (saved_ephemeral_plan_start[max_generation-1]); while (card != end_card) { set_card (card); card++; } } #endif //MULTIPLE_HEAPS #ifdef BACKGROUND_GC if (should_update_end_mark_size()) { background_soh_size_end_mark = generation_size (max_generation); } #endif //BACKGROUND_GC #endif //!USE_REGIONS { alloc_allocated = heap_segment_plan_allocated(ephemeral_heap_segment); //reset the allocated size #ifdef _DEBUG uint8_t* start = get_soh_start_object (ephemeral_heap_segment, youngest_generation); if (settings.promotion && !settings.demotion) { assert ((start + get_soh_start_obj_len (start)) == heap_segment_plan_allocated(ephemeral_heap_segment)); } #endif //_DEBUG heap_segment_allocated(ephemeral_heap_segment)= heap_segment_plan_allocated(ephemeral_heap_segment); } } #ifndef USE_REGIONS uint8_t* gc_heap::generation_limit (int gen_number) { if (settings.promotion) { if (gen_number <= 1) return heap_segment_reserved (ephemeral_heap_segment); else return generation_allocation_start (generation_of ((gen_number - 2))); } else { if (gen_number <= 0) return heap_segment_reserved (ephemeral_heap_segment); else return generation_allocation_start (generation_of ((gen_number - 1))); } } #endif //!USE_REGIONS BOOL gc_heap::ensure_gap_allocation (int condemned_gen_number) { #ifndef USE_REGIONS uint8_t* start = heap_segment_allocated (ephemeral_heap_segment); size_t size = Align (min_obj_size)*(condemned_gen_number+1); assert ((start + size) <= heap_segment_reserved (ephemeral_heap_segment)); if ((start + size) > heap_segment_committed (ephemeral_heap_segment)) { if (!grow_heap_segment (ephemeral_heap_segment, start + size)) { return FALSE; } } #endif //USE_REGIONS return TRUE; } uint8_t* gc_heap::allocate_at_end (size_t size) { uint8_t* start = heap_segment_allocated (ephemeral_heap_segment); size = Align (size); uint8_t* result = start; // only called to allocate a min obj so can't overflow here. assert ((start + size) <= heap_segment_reserved (ephemeral_heap_segment)); //ensure_gap_allocation took care of it assert ((start + size) <= heap_segment_committed (ephemeral_heap_segment)); heap_segment_allocated (ephemeral_heap_segment) += size; return result; } #ifdef USE_REGIONS // Find the first non empty region and also does the following in the process - // + decommit end of region if it's not a gen0 region; // + set the region gen_num to the new one; // // For empty regions, we always return empty regions to free unless it's a gen // start region. Note that I'm returning gen0 empty regions as well, however, // returning a region to free does not decommit. // // If this is called for a compacting GC, we know we always take the planned generation // on the region (and set the new allocated); else this is called for sweep in which case // it's more complicated - // // + if we are in the special sweep mode, we don't change the old gen number at all // + if we are not in special sweep we need to promote all regions, including the SIP ones // because we make the assumption that this is the case for sweep for handles. heap_segment* gc_heap::find_first_valid_region (heap_segment* region, bool compact_p) { check_seg_gen_num (generation_allocation_segment (generation_of (max_generation))); dprintf (REGIONS_LOG, (" FFVR region %Ix(%Ix), gen%d", (size_t)region, (region ? heap_segment_mem (region) : 0), (region ? heap_segment_gen_num (region) : 0))); if (!region) return 0; heap_segment* current_region = region; do { int gen_num = heap_segment_gen_num (current_region); int plan_gen_num = -1; if (compact_p) { assert (settings.compaction); plan_gen_num = heap_segment_plan_gen_num (current_region); dprintf (REGIONS_LOG, (" gen%d->%d", gen_num, plan_gen_num)); } else { plan_gen_num = (special_sweep_p ? gen_num : get_plan_gen_num (gen_num)); dprintf (REGIONS_LOG, (" gen%d->%d, special_sweep_p %d, swept_in_plan %d", gen_num, plan_gen_num, (int)special_sweep_p, (int)heap_segment_swept_in_plan (current_region))); } uint8_t* allocated = (compact_p ? heap_segment_plan_allocated (current_region) : heap_segment_allocated (current_region)); if (heap_segment_mem (current_region) == allocated) { heap_segment* region_to_delete = current_region; current_region = heap_segment_next (current_region); return_free_region (region_to_delete); dprintf (REGIONS_LOG, (" h%d gen%d return region %Ix to free, current->%Ix(%Ix)", heap_number, gen_num, heap_segment_mem (region_to_delete), current_region, (current_region ? heap_segment_mem (current_region) : 0))); if (!current_region) return 0; } else { if (compact_p) { dprintf (REGIONS_LOG, (" gen%d setting region %Ix alloc %Ix to plan %Ix", gen_num, heap_segment_mem (current_region), heap_segment_allocated (current_region), heap_segment_plan_allocated (current_region))); if (heap_segment_swept_in_plan (current_region)) { assert (heap_segment_allocated (current_region) == heap_segment_plan_allocated (current_region)); } else { heap_segment_allocated (current_region) = heap_segment_plan_allocated (current_region); } } else { // Set this so we keep plan gen and gen the same. set_region_plan_gen_num (current_region, plan_gen_num); } if (gen_num >= soh_gen2) { dprintf (REGIONS_LOG, (" gen%d decommit end of region %Ix(%Ix)", gen_num, current_region, heap_segment_mem (current_region))); decommit_heap_segment_pages (current_region, 0); } dprintf (REGIONS_LOG, (" set region %Ix(%Ix) gen num to %d", current_region, heap_segment_mem (current_region), plan_gen_num)); set_region_gen_num (current_region, plan_gen_num); break; } } while (current_region); assert (current_region); if (heap_segment_swept_in_plan (current_region)) { int gen_num = heap_segment_gen_num (current_region); dprintf (REGIONS_LOG, ("threading SIP region %Ix surv %Id onto gen%d", heap_segment_mem (current_region), heap_segment_survived (current_region), gen_num)); generation* gen = generation_of (gen_num); generation_allocator (gen)->thread_sip_fl (current_region); generation_free_list_space (gen) += heap_segment_free_list_size (current_region); generation_free_obj_space (gen) += heap_segment_free_obj_size (current_region); } // Take this opportunity to make sure all the regions left with flags only for this GC are reset. heap_segment_swept_in_plan (current_region) = false; current_region->flags &= ~heap_segment_flags_demoted; return current_region; } void gc_heap::thread_final_regions (bool compact_p) { for (int i = 0; i < max_generation; i++) { if (reserved_free_regions_sip[i]) { return_free_region (reserved_free_regions_sip[i]); } } int condemned_gen_number = settings.condemned_generation; generation_region_info generation_final_regions[max_generation + 1]; memset (generation_final_regions, 0, sizeof (generation_final_regions)); // Step 1: we initialize all the regions for generations we are not condemning with their // current head and tail as we know these regions will for sure exist. for (int gen_idx = max_generation; gen_idx > condemned_gen_number; gen_idx--) { generation* gen = generation_of (gen_idx); // Note this needs to be the first rw region as we will not be changing any ro regions and // we will work on thread rw regions here. generation_final_regions[gen_idx].head = heap_segment_rw (generation_start_segment (gen)); generation_final_regions[gen_idx].tail = generation_tail_region (gen); } #ifdef BACKGROUND_GC heap_segment* max_gen_tail_region = 0; if (should_update_end_mark_size()) { max_gen_tail_region = generation_final_regions[max_generation].tail; } #endif //BACKGROUND_GC // Step 2: for each region in the condemned generations, we thread it onto its planned generation // in our generation_final_regions array. for (int gen_idx = condemned_gen_number; gen_idx >= 0; gen_idx--) { heap_segment* current_region = heap_segment_rw (generation_start_segment (generation_of (gen_idx))); dprintf (REGIONS_LOG, ("gen%d start from %Ix", gen_idx, heap_segment_mem (current_region))); while ((current_region = find_first_valid_region (current_region, compact_p))) { assert (!compact_p || (heap_segment_plan_gen_num (current_region) == heap_segment_gen_num (current_region))); int new_gen_num = heap_segment_plan_gen_num (current_region); generation* new_gen = generation_of (new_gen_num); heap_segment* next_region = heap_segment_next (current_region); if (generation_final_regions[new_gen_num].head) { assert (generation_final_regions[new_gen_num].tail); // The new gen already exists, just thread this region onto it. dprintf (REGIONS_LOG, ("gen%d exists, tail region %Ix next -> %Ix", new_gen_num, heap_segment_mem (generation_final_regions[new_gen_num].tail), heap_segment_mem (current_region))); heap_segment_next (generation_final_regions[new_gen_num].tail) = current_region; generation_final_regions[new_gen_num].tail = current_region; } else { generation_final_regions[new_gen_num].head = current_region; generation_final_regions[new_gen_num].tail = current_region; } current_region = next_region; } } // Step 3: all the tail regions' next needs to be set to 0. for (int gen_idx = 0; gen_idx <= max_generation; gen_idx++) { generation* gen = generation_of (gen_idx); if (generation_final_regions[gen_idx].tail) { heap_segment_next (generation_final_regions[gen_idx].tail) = 0; //if (heap_segment_next (generation_final_regions[gen_idx].tail) != 0) //{ // dprintf (REGIONS_LOG, ("tail->next is %Ix", // heap_segment_next (generation_final_regions[gen_idx].tail))); // GCToOSInterface::DebugBreak(); //} } } #ifdef BACKGROUND_GC if (max_gen_tail_region) { max_gen_tail_region = heap_segment_next (max_gen_tail_region); while (max_gen_tail_region) { background_soh_size_end_mark += heap_segment_allocated (max_gen_tail_region) - heap_segment_mem (max_gen_tail_region); max_gen_tail_region = heap_segment_next (max_gen_tail_region); } } #endif //BACKGROUND_GC // Step 4: if a generation doesn't have any regions, we need to get a new one for it; // otherwise we just set the head region as the start region for that generation. for (int gen_idx = 0; gen_idx <= max_generation; gen_idx++) { bool condemned_p = (gen_idx <= condemned_gen_number); assert (condemned_p || generation_final_regions[gen_idx].head); generation* gen = generation_of (gen_idx); heap_segment* start_region = 0; if (generation_final_regions[gen_idx].head) { if (condemned_p) { start_region = generation_final_regions[gen_idx].head; thread_start_region (gen, start_region); } generation_tail_region (gen) = generation_final_regions[gen_idx].tail; dprintf (REGIONS_LOG, ("setting gen%d start %Ix, tail %Ix", gen_idx, heap_segment_mem (heap_segment_rw (generation_start_segment (gen))), heap_segment_mem (generation_tail_region (gen)))); } else { start_region = get_free_region (gen_idx); thread_start_region (gen, start_region); dprintf (REGIONS_LOG, ("creating new gen%d at %Ix", gen_idx, heap_segment_mem (start_region))); } if (condemned_p) { uint8_t* gen_start = heap_segment_mem (start_region); reset_allocation_pointers (gen, gen_start); } } verify_regions (true, false); } void gc_heap::thread_start_region (generation* gen, heap_segment* region) { heap_segment* prev_region = generation_tail_ro_region (gen); if (prev_region) { heap_segment_next (prev_region) = region; dprintf (REGIONS_LOG,("gen%d tail ro %Ix(%Ix) next -> %Ix(%Ix)", gen->gen_num, (size_t)prev_region, heap_segment_mem (prev_region), (size_t)region, heap_segment_mem (region))); } else { generation_start_segment (gen) = region; dprintf (REGIONS_LOG, ("start region of gen%d -> %Ix(%Ix)", gen->gen_num, (size_t)region, heap_segment_mem (region))); } dprintf (REGIONS_LOG, ("tail region of gen%d -> %Ix(%Ix)", gen->gen_num, (size_t)region, heap_segment_mem (region))); generation_tail_region (gen) = region; } heap_segment* gc_heap::get_new_region (int gen_number, size_t size) { heap_segment* new_region = get_free_region (gen_number, size); if (new_region) { switch (gen_number) { default: assert ((new_region->flags & (heap_segment_flags_loh | heap_segment_flags_poh)) == 0); break; case loh_generation: new_region->flags |= heap_segment_flags_loh; break; case poh_generation: new_region->flags |= heap_segment_flags_poh; break; } generation* gen = generation_of (gen_number); heap_segment_next (generation_tail_region (gen)) = new_region; generation_tail_region (gen) = new_region; verify_regions (gen_number, false, settings.concurrent); } return new_region; } heap_segment* gc_heap::allocate_new_region (gc_heap* hp, int gen_num, bool uoh_p, size_t size) { uint8_t* start = 0; uint8_t* end = 0; // size parameter should be non-zero only for large regions assert (uoh_p || size == 0); // REGIONS TODO: allocate POH regions on the right bool allocated_p = (uoh_p ? global_region_allocator.allocate_large_region (&start, &end, allocate_forward, size, on_used_changed) : global_region_allocator.allocate_basic_region (&start, &end, on_used_changed)); if (!allocated_p) { return 0; } heap_segment* res = make_heap_segment (start, (end - start), hp, gen_num); dprintf (REGIONS_LOG, ("got a new region %Ix %Ix->%Ix", (size_t)res, start, end)); if (res == nullptr) { global_region_allocator.delete_region (start); } return res; } void gc_heap::update_start_tail_regions (generation* gen, heap_segment* region_to_delete, heap_segment* prev_region, heap_segment* next_region) { if (region_to_delete == heap_segment_rw (generation_start_segment (gen))) { assert (!prev_region); heap_segment* tail_ro_region = generation_tail_ro_region (gen); if (tail_ro_region) { heap_segment_next (tail_ro_region) = next_region; dprintf (REGIONS_LOG, ("gen%d tail ro %Ix(%Ix) next updated to %Ix(%Ix)", gen->gen_num, (size_t)tail_ro_region, heap_segment_mem (tail_ro_region), (size_t)next_region, heap_segment_mem (next_region))); } else { generation_start_segment (gen) = next_region; dprintf (REGIONS_LOG, ("start region of gen%d updated to %Ix(%Ix)", gen->gen_num, (size_t)next_region, heap_segment_mem (next_region))); } } if (region_to_delete == generation_tail_region (gen)) { assert (!next_region); generation_tail_region (gen) = prev_region; dprintf (REGIONS_LOG, ("tail region of gen%d updated to %Ix(%Ix)", gen->gen_num, (size_t)prev_region, heap_segment_mem (prev_region))); } verify_regions (false, settings.concurrent); } // There's one complication with deciding whether we can make a region SIP or not - if the plan_gen_num of // a generation is not maxgen, and if we want to make every region in that generation maxgen, we need to // make sure we can get a new region for this generation so we can guarantee each generation has at least // one region. If we can't get a new region, we need to make sure we leave at least one region in that gen // to guarantee our invariant. // // This new region we get needs to be temporarily recorded instead of being on the free_regions list because // we can't use it for other purposes. inline bool gc_heap::should_sweep_in_plan (heap_segment* region) { bool sip_p = false; int gen_num = get_region_gen_num (region); int new_gen_num = get_plan_gen_num (gen_num); heap_segment_swept_in_plan (region) = false; dprintf (REGIONS_LOG, ("checking if region %Ix should be SIP", heap_segment_mem (region))); #ifdef STRESS_REGIONS // Only do this for testing or it would keep too much swept. if (0) { num_condemned_regions++; if ((num_condemned_regions % sip_seg_interval) == 0) { set_region_plan_gen_num (region, new_gen_num); sip_p = true; } if ((num_condemned_regions % sip_seg_maxgen_interval) == 0) { set_region_plan_gen_num (region, max_generation); sip_maxgen_regions_per_gen[gen_num]++; sip_p = true; } } else #endif //STRESS_REGIONS { size_t basic_region_size = (size_t)1 << min_segment_size_shr; assert (heap_segment_gen_num (region) == heap_segment_plan_gen_num (region)); int surv_ratio = (int)(((double)heap_segment_survived (region) * 100.0) / (double)basic_region_size); dprintf (2222, ("SSIP: region %Ix surv %Id / %Id = %d%%(%d)", heap_segment_mem (region), heap_segment_survived (region), basic_region_size, surv_ratio, sip_surv_ratio_th)); if (surv_ratio >= sip_surv_ratio_th) { set_region_plan_gen_num (region, new_gen_num); sip_p = true; } if (new_gen_num < max_generation) { int old_card_surv_ratio = (int)(((double)heap_segment_old_card_survived (region) * 100.0) / (double)basic_region_size); dprintf (2222, ("SSIP: region %Ix old card surv %Id / %Id = %d%%(%d)", heap_segment_mem (region), heap_segment_old_card_survived (region), basic_region_size, old_card_surv_ratio, sip_surv_ratio_th)); if (old_card_surv_ratio >= sip_old_card_surv_ratio_th) { set_region_plan_gen_num (region, max_generation); sip_maxgen_regions_per_gen[gen_num]++; sip_p = true; } } } if (sip_p) { num_sip_regions++; if ((new_gen_num < max_generation) && (sip_maxgen_regions_per_gen[gen_num] == regions_per_gen[gen_num])) { assert (get_region_gen_num (region) == 0); assert (new_gen_num < max_generation); heap_segment* reserved_free_region = get_free_region (gen_num); if (reserved_free_region) { dprintf (REGIONS_LOG, ("all regions in gen%d -> SIP 2, get a new region for it %Ix", gen_num, heap_segment_mem (reserved_free_region))); reserved_free_regions_sip[gen_num] = reserved_free_region; } else { // If we cannot get another region, simply revert our decision. sip_maxgen_regions_per_gen[gen_num]--; set_region_plan_gen_num (region, new_gen_num); } } } dprintf (REGIONS_LOG, ("region %Ix %s SIP", heap_segment_mem (region), (sip_p ? "is" : "is not"))); return sip_p; } void heap_segment::thread_free_obj (uint8_t* obj, size_t s) { //dprintf (REGIONS_LOG, ("threading SIP free obj %Ix-%Ix(%Id)", obj, (obj + s), s)); if (s >= min_free_list) { free_list_slot (obj) = 0; if (free_list_head) { assert (free_list_tail); free_list_slot (free_list_tail) = obj; } else { free_list_head = obj; } free_list_tail = obj; free_list_size += s; } else { free_obj_size += s; } } // For a region that we sweep in plan, we need to do the following - // // + set the swept_in_plan_p for this region. // + update allocated for this region. // + build bricks. // + build free objects. We keep a list of them which will then be threaded onto the appropriate generation's // free list. This can be optimized, both gen0 and gen2 GCs are easy to handle - need to see how easy it is // to handle gen1 GCs as the commit/repair there is complicated. // // in plan_phase we also need to make sure to not call update_brick_table when handling end of this region, // and the plan gen num is set accordingly. void gc_heap::sweep_region_in_plan (heap_segment* region, BOOL use_mark_list, uint8_t**& mark_list_next, uint8_t** mark_list_index) { heap_segment_swept_in_plan (region) = true; region->init_free_list(); uint8_t* x = heap_segment_mem (region); uint8_t* last_marked_obj_start = 0; uint8_t* last_marked_obj_end = 0; uint8_t* end = heap_segment_allocated (region); dprintf (2222, ("h%d region %Ix->%Ix SIP, gen %d->%d, %s mark list(%Ix->%Ix, %Ix->%Ix)", heap_number, x, end, heap_segment_gen_num (region), heap_segment_plan_gen_num (region), (use_mark_list ? "using" : "not using"), (uint8_t*)mark_list_next, (mark_list_next ? *mark_list_next : 0), (uint8_t*)mark_list_index, (mark_list_index ? *mark_list_index : 0))); #ifdef _DEBUG size_t survived = 0; uint8_t* saved_last_unmarked_obj_start = 0; uint8_t* saved_last_unmarked_obj_end = 0; size_t saved_obj_brick = 0; size_t saved_next_obj_brick = 0; #endif //_DEBUG while (x < end) { uint8_t* obj = x; size_t obj_brick = (size_t)obj / brick_size; uint8_t* next_obj = 0; if (marked (obj)) { if (pinned(obj)) { clear_pinned (obj); } clear_marked (obj); size_t s = size (obj); next_obj = obj + Align (s); last_marked_obj_start = obj; last_marked_obj_end = next_obj; #ifdef _DEBUG survived += s; #endif //_DEBUG dprintf (4444, ("M: %Ix-%Ix(%Id)", obj, next_obj, s)); } else { next_obj = find_next_marked (x, end, use_mark_list, mark_list_next, mark_list_index); #ifdef _DEBUG saved_last_unmarked_obj_start = obj; saved_last_unmarked_obj_end = next_obj; #endif //_DEBUG if ((next_obj > obj) && (next_obj != end)) { size_t free_obj_size = next_obj - obj; make_unused_array (obj, free_obj_size); region->thread_free_obj (obj, free_obj_size); dprintf (4444, ("UM threading: %Ix-%Ix(%Id)", obj, next_obj, (next_obj - obj))); } } size_t next_obj_brick = (size_t)next_obj / brick_size; #ifdef _DEBUG saved_obj_brick = obj_brick; saved_next_obj_brick = next_obj_brick; #endif //_DEBUG if (next_obj_brick != obj_brick) { fix_brick_to_highest (obj, next_obj); } x = next_obj; } if (last_marked_obj_start) { // We only need to make sure we fix the brick the last marked object's end is in. // Note this brick could have been fixed already. size_t last_marked_obj_start_b = brick_of (last_marked_obj_start); size_t last_marked_obj_end_b = brick_of (last_marked_obj_end - 1); dprintf (REGIONS_LOG, ("last live obj %Ix(%Ix)-%Ix, fixing its brick(s) %Ix-%Ix", last_marked_obj_start, method_table (last_marked_obj_start), last_marked_obj_end, last_marked_obj_start_b, last_marked_obj_end_b)); if (last_marked_obj_start_b == last_marked_obj_end_b) { set_brick (last_marked_obj_start_b, (last_marked_obj_start - brick_address (last_marked_obj_start_b))); } else { set_brick (last_marked_obj_end_b, (last_marked_obj_start_b - last_marked_obj_end_b)); } } else { last_marked_obj_end = heap_segment_mem (region); } #ifdef _DEBUG size_t region_index = get_basic_region_index_for_address (heap_segment_mem (region)); dprintf (REGIONS_LOG, ("region #%d %Ix survived %Id, %s recorded %Id", region_index, heap_segment_mem (region), survived, ((survived == heap_segment_survived (region)) ? "same as" : "diff from"), heap_segment_survived (region))); #ifdef MULTIPLE_HEAPS assert (survived <= (size_t)heap_segment_survived (region)); #else assert (survived == (size_t)heap_segment_survived (region)); #endif //MULTIPLE_HEAPS #endif //_DEBUG assert (last_marked_obj_end); heap_segment_saved_allocated (region) = heap_segment_allocated (region); heap_segment_allocated (region) = last_marked_obj_end; heap_segment_plan_allocated (region) = heap_segment_allocated (region); int plan_gen_num = heap_segment_plan_gen_num (region); generation_allocation_size (generation_of (plan_gen_num)) += heap_segment_survived (region); dprintf (REGIONS_LOG, ("sip: g%d alloc size is now %Id", plan_gen_num, generation_allocation_size (generation_of (plan_gen_num)))); } inline void gc_heap::check_demotion_helper_sip (uint8_t** pval, int parent_gen_num, uint8_t* parent_loc) { uint8_t* child_object = *pval; if (!is_in_heap_range (child_object)) return; if (!child_object) return; int child_object_plan_gen = get_region_plan_gen_num (child_object); if (child_object_plan_gen < parent_gen_num) { set_card (card_of (parent_loc)); } dprintf (3, ("SCS %d, %d", child_object_plan_gen, parent_gen_num)); } heap_segment* gc_heap::relocate_advance_to_non_sip (heap_segment* region) { THREAD_FROM_HEAP; heap_segment* current_region = region; dprintf (REGIONS_LOG, ("Relocate searching for next non SIP, starting from %Ix", (region ? heap_segment_mem (region) : 0))); while (current_region) { if (heap_segment_swept_in_plan (current_region)) { int gen_num = heap_segment_gen_num (current_region); int plan_gen_num = heap_segment_plan_gen_num (current_region); bool use_sip_demotion = (plan_gen_num > get_plan_gen_num (gen_num)); dprintf (REGIONS_LOG, ("region %Ix is SIP, relocating, gen %d, plan gen: %d(supposed to be %d) %s", heap_segment_mem (current_region), gen_num, plan_gen_num, get_plan_gen_num (gen_num), (use_sip_demotion ? "Sd" : "d"))); uint8_t* x = heap_segment_mem (current_region); uint8_t* end = heap_segment_allocated (current_region); // For SIP regions, we go linearly in the region and relocate each object's references. while (x < end) { size_t s = size (x); assert (s > 0); uint8_t* next_obj = x + Align (s); Prefetch (next_obj); if (!(((CObjectHeader*)x)->IsFree())) { //relocate_obj_helper (x, s); if (contain_pointers (x)) { dprintf (3, ("$%Ix$", (size_t)x)); go_through_object_nostart (method_table(x), x, s, pval, { uint8_t* child = *pval; //reloc_survivor_helper (pval); relocate_address (pval THREAD_NUMBER_ARG); if (use_sip_demotion) check_demotion_helper_sip (pval, plan_gen_num, (uint8_t*)pval); else check_demotion_helper (pval, (uint8_t*)pval); if (child) { dprintf (4444, ("SIP %Ix(%Ix)->%Ix->%Ix(%Ix)", x, (uint8_t*)pval, child, *pval, method_table (child))); } }); } check_class_object_demotion (x); } x = next_obj; } } else { int gen_num = heap_segment_gen_num (current_region); int plan_gen_num = heap_segment_plan_gen_num (current_region); dprintf (REGIONS_LOG, ("region %Ix is not SIP, relocating, gen %d, plan gen: %d", heap_segment_mem (current_region), gen_num, plan_gen_num)); return current_region; } current_region = heap_segment_next (current_region); } return 0; } #ifdef STRESS_REGIONS void gc_heap::pin_by_gc (uint8_t* object) { heap_segment* region = region_of (object); HndAssignHandleGC(pinning_handles_for_alloc[ph_index_per_heap], object); dprintf (REGIONS_LOG, ("h%d pinning object at %Ix on eph seg %Ix (ph#%d)", heap_number, object, heap_segment_mem (region), ph_index_per_heap)); ph_index_per_heap++; if (ph_index_per_heap == PINNING_HANDLE_INITIAL_LENGTH) { ph_index_per_heap = 0; } } #endif //STRESS_REGIONS #endif //USE_REGIONS void gc_heap::make_free_lists (int condemned_gen_number) { //Promotion has to happen in sweep case. assert (settings.promotion); make_free_args args; int stop_gen_idx = get_stop_generation_index (condemned_gen_number); for (int i = condemned_gen_number; i >= stop_gen_idx; i--) { generation* condemned_gen = generation_of (i); heap_segment* current_heap_segment = get_start_segment (condemned_gen); #ifdef USE_REGIONS if (!current_heap_segment) continue; #endif //USE_REGIONS uint8_t* start_address = get_soh_start_object (current_heap_segment, condemned_gen); size_t current_brick = brick_of (start_address); PREFIX_ASSUME(current_heap_segment != NULL); uint8_t* end_address = heap_segment_allocated (current_heap_segment); size_t end_brick = brick_of (end_address-1); int current_gen_num = i; args.free_list_gen_number = (special_sweep_p ? current_gen_num : get_plan_gen_num (current_gen_num)); args.free_list_gen = generation_of (args.free_list_gen_number); args.highest_plug = 0; #ifdef USE_REGIONS dprintf (REGIONS_LOG, ("starting at gen%d %Ix -> %Ix", i, start_address, end_address)); #else assert (!special_sweep_p); args.current_gen_limit = (((current_gen_num == max_generation)) ? MAX_PTR : (generation_limit (args.free_list_gen_number))); #endif //USE_REGIONS #ifndef USE_REGIONS if ((start_address >= end_address) && (condemned_gen_number < max_generation)) { break; } #endif //!USE_REGIONS while (1) { if ((current_brick > end_brick)) { #ifndef USE_REGIONS if (args.current_gen_limit == MAX_PTR) { //We had an empty segment //need to allocate the generation start generation* gen = generation_of (max_generation); heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(start_seg != NULL); uint8_t* gap = heap_segment_mem (start_seg); generation_allocation_start (gen) = gap; heap_segment_allocated (start_seg) = gap + Align (min_obj_size); make_unused_array (gap, Align (min_obj_size)); reset_allocation_pointers (gen, gap); dprintf (3, ("Start segment empty, fixing generation start of %d to: %Ix", max_generation, (size_t)gap)); args.current_gen_limit = generation_limit (args.free_list_gen_number); } #endif //!USE_REGIONS if (heap_segment_next_non_sip (current_heap_segment)) { current_heap_segment = heap_segment_next_non_sip (current_heap_segment); } else { break; } current_brick = brick_of (heap_segment_mem (current_heap_segment)); end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1); continue; } { int brick_entry = brick_table [ current_brick ]; if ((brick_entry >= 0)) { make_free_list_in_brick (brick_address (current_brick) + brick_entry-1, &args); dprintf(3,("Fixing brick entry %Ix to %Ix", current_brick, (size_t)args.highest_plug)); set_brick (current_brick, (args.highest_plug - brick_address (current_brick))); } else { if ((brick_entry > -32768)) { #ifdef _DEBUG ptrdiff_t offset = brick_of (args.highest_plug) - current_brick; if ((brick_entry != -32767) && (! ((offset == brick_entry)))) { assert ((brick_entry == -1)); } #endif //_DEBUG //init to -1 for faster find_first_object set_brick (current_brick, -1); } } } current_brick++; } } { #ifdef USE_REGIONS check_seg_gen_num (generation_allocation_segment (generation_of (max_generation))); thread_final_regions (false); generation* gen_gen0 = generation_of (0); ephemeral_heap_segment = generation_start_segment (gen_gen0); alloc_allocated = heap_segment_allocated (ephemeral_heap_segment); // Since we didn't compact, we should recalculate the end_gen0_region_space. end_gen0_region_space = get_gen0_end_space(); #else //USE_REGIONS int bottom_gen = 0; args.free_list_gen_number--; while (args.free_list_gen_number >= bottom_gen) { uint8_t* gap = 0; generation* gen2 = generation_of (args.free_list_gen_number); gap = allocate_at_end (Align(min_obj_size)); generation_allocation_start (gen2) = gap; reset_allocation_pointers (gen2, gap); dprintf(3,("Fixing generation start of %d to: %Ix", args.free_list_gen_number, (size_t)gap)); PREFIX_ASSUME(gap != NULL); make_unused_array (gap, Align (min_obj_size)); args.free_list_gen_number--; } //reset the allocated size uint8_t* start2 = generation_allocation_start (youngest_generation); alloc_allocated = start2 + Align (size (start2)); #endif //USE_REGIONS } } void gc_heap::make_free_list_in_brick (uint8_t* tree, make_free_args* args) { assert ((tree != NULL)); { int right_node = node_right_child (tree); int left_node = node_left_child (tree); args->highest_plug = 0; if (! (0 == tree)) { if (! (0 == left_node)) { make_free_list_in_brick (tree + left_node, args); } { uint8_t* plug = tree; size_t gap_size = node_gap_size (tree); uint8_t* gap = (plug - gap_size); args->highest_plug = tree; dprintf (3,("plug: %Ix (highest p: %Ix), free %Ix len %Id in %d", plug, args->highest_plug, (size_t)gap, gap_size, args->free_list_gen_number)); #ifdef SHORT_PLUGS if (is_plug_padded (plug)) { dprintf (3, ("%Ix padded", plug)); clear_plug_padded (plug); } #endif //SHORT_PLUGS #ifdef DOUBLY_LINKED_FL // These 2 checks should really just be merged into one. if (is_plug_bgc_mark_bit_set (plug)) { dprintf (3333, ("cbgcm: %Ix", plug)); clear_plug_bgc_mark_bit (plug); } if (is_free_obj_in_compact_bit_set (plug)) { dprintf (3333, ("cfoc: %Ix", plug)); clear_free_obj_in_compact_bit (plug); } #endif //DOUBLY_LINKED_FL #ifndef USE_REGIONS gen_crossing: { if ((args->current_gen_limit == MAX_PTR) || ((plug >= args->current_gen_limit) && ephemeral_pointer_p (plug))) { dprintf(3,(" Crossing Generation boundary at %Ix", (size_t)args->current_gen_limit)); if (!(args->current_gen_limit == MAX_PTR)) { args->free_list_gen_number--; args->free_list_gen = generation_of (args->free_list_gen_number); } dprintf(3,( " Fixing generation start of %d to: %Ix", args->free_list_gen_number, (size_t)gap)); reset_allocation_pointers (args->free_list_gen, gap); args->current_gen_limit = generation_limit (args->free_list_gen_number); if ((gap_size >= (2*Align (min_obj_size)))) { dprintf(3,(" Splitting the gap in two %Id left", gap_size)); make_unused_array (gap, Align(min_obj_size)); gap_size = (gap_size - Align(min_obj_size)); gap = (gap + Align(min_obj_size)); } else { make_unused_array (gap, gap_size); gap_size = 0; } goto gen_crossing; } } #endif //!USE_REGIONS thread_gap (gap, gap_size, args->free_list_gen); add_gen_free (args->free_list_gen->gen_num, gap_size); } if (! (0 == right_node)) { make_free_list_in_brick (tree + right_node, args); } } } } void gc_heap::thread_gap (uint8_t* gap_start, size_t size, generation* gen) { #ifndef USE_REGIONS assert (generation_allocation_start (gen)); #endif if ((size > 0)) { #ifndef USE_REGIONS assert ((heap_segment_rw (generation_start_segment (gen)) != ephemeral_heap_segment) || (gap_start > generation_allocation_start (gen))); #endif //USE_REGIONS // The beginning of a segment gap is not aligned assert (size >= Align (min_obj_size)); make_unused_array (gap_start, size, (!settings.concurrent && (gen != youngest_generation)), (gen->gen_num == max_generation)); dprintf (3, ("fr: [%Ix, %Ix[", (size_t)gap_start, (size_t)gap_start+size)); if ((size >= min_free_list)) { generation_free_list_space (gen) += size; generation_allocator (gen)->thread_item (gap_start, size); } else { generation_free_obj_space (gen) += size; } } } void gc_heap::uoh_thread_gap_front (uint8_t* gap_start, size_t size, generation* gen) { #ifndef USE_REGIONS assert (generation_allocation_start (gen)); #endif if (size >= min_free_list) { generation_free_list_space (gen) += size; generation_allocator (gen)->thread_item_front (gap_start, size); } } void gc_heap::make_unused_array (uint8_t* x, size_t size, BOOL clearp, BOOL resetp) { dprintf (3, (ThreadStressLog::gcMakeUnusedArrayMsg(), (size_t)x, (size_t)(x+size))); assert (size >= Align (min_obj_size)); //#if defined (VERIFY_HEAP) && defined (BACKGROUND_GC) // check_batch_mark_array_bits (x, x+size); //#endif //VERIFY_HEAP && BACKGROUND_GC if (resetp) { #ifdef BGC_SERVO_TUNING // Don't do this for servo tuning because it makes it even harder to regulate WS. if (!(bgc_tuning::enable_fl_tuning && bgc_tuning::fl_tuning_triggered)) #endif //BGC_SERVO_TUNING { reset_memory (x, size); } } ((CObjectHeader*)x)->SetFree(size); #ifdef HOST_64BIT #if BIGENDIAN #error "This won't work on big endian platforms" #endif size_t size_as_object = (uint32_t)(size - free_object_base_size) + free_object_base_size; if (size_as_object < size) { // // If the size is more than 4GB, we need to create multiple objects because of // the Array::m_NumComponents is uint32_t and the high 32 bits of unused array // size is ignored in regular object size computation. // uint8_t * tmp = x + size_as_object; size_t remaining_size = size - size_as_object; while (remaining_size > UINT32_MAX) { // Make sure that there will be at least Align(min_obj_size) left size_t current_size = UINT32_MAX - get_alignment_constant (FALSE) - Align (min_obj_size, get_alignment_constant (FALSE)); ((CObjectHeader*)tmp)->SetFree(current_size); remaining_size -= current_size; tmp += current_size; } ((CObjectHeader*)tmp)->SetFree(remaining_size); } #endif if (clearp) clear_card_for_addresses (x, x + Align(size)); } // Clear memory set by make_unused_array. void gc_heap::clear_unused_array (uint8_t* x, size_t size) { // Also clear the sync block *(((PTR_PTR)x)-1) = 0; ((CObjectHeader*)x)->UnsetFree(); #ifdef HOST_64BIT #if BIGENDIAN #error "This won't work on big endian platforms" #endif // The memory could have been cleared in the meantime. We have to mirror the algorithm // from make_unused_array since we cannot depend on the object sizes in memory. size_t size_as_object = (uint32_t)(size - free_object_base_size) + free_object_base_size; if (size_as_object < size) { uint8_t * tmp = x + size_as_object; size_t remaining_size = size - size_as_object; while (remaining_size > UINT32_MAX) { size_t current_size = UINT32_MAX - get_alignment_constant (FALSE) - Align (min_obj_size, get_alignment_constant (FALSE)); ((CObjectHeader*)tmp)->UnsetFree(); remaining_size -= current_size; tmp += current_size; } ((CObjectHeader*)tmp)->UnsetFree(); } #else UNREFERENCED_PARAMETER(size); #endif } inline uint8_t* tree_search (uint8_t* tree, uint8_t* old_address) { uint8_t* candidate = 0; int cn; while (1) { if (tree < old_address) { if ((cn = node_right_child (tree)) != 0) { assert (candidate < tree); candidate = tree; tree = tree + cn; Prefetch (tree - 8); continue; } else break; } else if (tree > old_address) { if ((cn = node_left_child (tree)) != 0) { tree = tree + cn; Prefetch (tree - 8); continue; } else break; } else break; } if (tree <= old_address) return tree; else if (candidate) return candidate; else return tree; } #ifdef FEATURE_BASICFREEZE bool gc_heap::frozen_object_p (Object* obj) { heap_segment* seg = seg_mapping_table_segment_of ((uint8_t*)obj); return heap_segment_read_only_p (seg); } #endif // FEATURE_BASICFREEZE void gc_heap::relocate_address (uint8_t** pold_address THREAD_NUMBER_DCL) { uint8_t* old_address = *pold_address; #ifdef USE_REGIONS if (!is_in_heap_range (old_address) || !should_check_brick_for_reloc (old_address)) { return; } #else //USE_REGIONS if (!((old_address >= gc_low) && (old_address < gc_high))) #ifdef MULTIPLE_HEAPS { UNREFERENCED_PARAMETER(thread); if (old_address == 0) return; gc_heap* hp = heap_of (old_address); if ((hp == this) || !((old_address >= hp->gc_low) && (old_address < hp->gc_high))) return; } #else //MULTIPLE_HEAPS return ; #endif //MULTIPLE_HEAPS #endif //USE_REGIONS // delta translates old_address into address_gc (old_address); size_t brick = brick_of (old_address); int brick_entry = brick_table [ brick ]; uint8_t* new_address = old_address; if (! ((brick_entry == 0))) { retry: { while (brick_entry < 0) { brick = (brick + brick_entry); brick_entry = brick_table [ brick ]; } uint8_t* old_loc = old_address; uint8_t* node = tree_search ((brick_address (brick) + brick_entry-1), old_loc); if ((node <= old_loc)) new_address = (old_address + node_relocation_distance (node)); else { if (node_left_p (node)) { dprintf(3,(" L: %Ix", (size_t)node)); new_address = (old_address + (node_relocation_distance (node) + node_gap_size (node))); } else { brick = brick - 1; brick_entry = brick_table [ brick ]; goto retry; } } } dprintf (4, (ThreadStressLog::gcRelocateReferenceMsg(), pold_address, old_address, new_address)); *pold_address = new_address; return; } #ifdef FEATURE_LOH_COMPACTION if (settings.loh_compaction) { heap_segment* pSegment = seg_mapping_table_segment_of ((uint8_t*)old_address); #ifdef USE_REGIONS // pSegment could be 0 for regions, see comment for is_in_condemned. if (!pSegment) { return; } #endif //USE_REGIONS #ifdef MULTIPLE_HEAPS if (heap_segment_heap (pSegment)->loh_compacted_p) #else if (loh_compacted_p) #endif { size_t flags = pSegment->flags; if ((flags & heap_segment_flags_loh) #ifdef FEATURE_BASICFREEZE && !(flags & heap_segment_flags_readonly) #endif ) { new_address = old_address + loh_node_relocation_distance (old_address); dprintf (4, (ThreadStressLog::gcRelocateReferenceMsg(), pold_address, old_address, new_address)); *pold_address = new_address; } } } #endif //FEATURE_LOH_COMPACTION } inline void gc_heap::check_class_object_demotion (uint8_t* obj) { #ifdef COLLECTIBLE_CLASS if (is_collectible(obj)) { check_class_object_demotion_internal (obj); } #else UNREFERENCED_PARAMETER(obj); #endif //COLLECTIBLE_CLASS } #ifdef COLLECTIBLE_CLASS NOINLINE void gc_heap::check_class_object_demotion_internal (uint8_t* obj) { if (settings.demotion) { #ifdef MULTIPLE_HEAPS // We set the card without checking the demotion range 'cause at this point // the handle that points to the loader allocator object may or may not have // been relocated by other GC threads. set_card (card_of (obj)); #else THREAD_FROM_HEAP; uint8_t* class_obj = get_class_object (obj); dprintf (3, ("%Ix: got classobj %Ix", obj, class_obj)); uint8_t* temp_class_obj = class_obj; uint8_t** temp = &temp_class_obj; relocate_address (temp THREAD_NUMBER_ARG); check_demotion_helper (temp, obj); #endif //MULTIPLE_HEAPS } } #endif //COLLECTIBLE_CLASS inline void gc_heap::check_demotion_helper (uint8_t** pval, uint8_t* parent_obj) { #ifdef USE_REGIONS uint8_t* child_object = *pval; if (!is_in_heap_range (child_object)) return; int child_object_plan_gen = get_region_plan_gen_num (child_object); bool child_obj_demoted_p = is_region_demoted (child_object); if (child_obj_demoted_p) { set_card (card_of (parent_obj)); } dprintf (3, ("SC %d (%s)", child_object_plan_gen, (child_obj_demoted_p ? "D" : "ND"))); #else //USE_REGIONS // detect if we are demoting an object if ((*pval < demotion_high) && (*pval >= demotion_low)) { dprintf(3, ("setting card %Ix:%Ix", card_of((uint8_t*)pval), (size_t)pval)); set_card (card_of (parent_obj)); } #ifdef MULTIPLE_HEAPS else if (settings.demotion) { dprintf (4, ("Demotion active, computing heap_of object")); gc_heap* hp = heap_of (*pval); if ((*pval < hp->demotion_high) && (*pval >= hp->demotion_low)) { dprintf(3, ("setting card %Ix:%Ix", card_of((uint8_t*)pval), (size_t)pval)); set_card (card_of (parent_obj)); } } #endif //MULTIPLE_HEAPS #endif //USE_REGIONS } inline void gc_heap::reloc_survivor_helper (uint8_t** pval) { THREAD_FROM_HEAP; relocate_address (pval THREAD_NUMBER_ARG); check_demotion_helper (pval, (uint8_t*)pval); } inline void gc_heap::relocate_obj_helper (uint8_t* x, size_t s) { THREAD_FROM_HEAP; if (contain_pointers (x)) { dprintf (3, ("o$%Ix$", (size_t)x)); go_through_object_nostart (method_table(x), x, s, pval, { uint8_t* child = *pval; reloc_survivor_helper (pval); if (child) { dprintf (3, ("%Ix->%Ix->%Ix", (uint8_t*)pval, child, *pval)); } }); } check_class_object_demotion (x); } inline void gc_heap::reloc_ref_in_shortened_obj (uint8_t** address_to_set_card, uint8_t** address_to_reloc) { THREAD_FROM_HEAP; uint8_t* old_val = (address_to_reloc ? *address_to_reloc : 0); relocate_address (address_to_reloc THREAD_NUMBER_ARG); if (address_to_reloc) { dprintf (3, ("SR %Ix: %Ix->%Ix", (uint8_t*)address_to_reloc, old_val, *address_to_reloc)); } check_demotion_helper (address_to_reloc, (uint8_t*)address_to_set_card); } void gc_heap::relocate_pre_plug_info (mark* pinned_plug_entry) { THREAD_FROM_HEAP; uint8_t* plug = pinned_plug (pinned_plug_entry); uint8_t* pre_plug_start = plug - sizeof (plug_and_gap); // Note that we need to add one ptr size here otherwise we may not be able to find the relocated // address. Consider this scenario: // gen1 start | 3-ptr sized NP | PP // 0 | 0x18 | 0x30 // If we are asking for the reloc address of 0x10 we will AV in relocate_address because // the first plug we saw in the brick is 0x18 which means 0x10 will cause us to go back a brick // which is 0, and then we'll AV in tree_search when we try to do node_right_child (tree). pre_plug_start += sizeof (uint8_t*); uint8_t** old_address = &pre_plug_start; uint8_t* old_val = (old_address ? *old_address : 0); relocate_address (old_address THREAD_NUMBER_ARG); if (old_address) { dprintf (3, ("PreR %Ix: %Ix->%Ix, set reloc: %Ix", (uint8_t*)old_address, old_val, *old_address, (pre_plug_start - sizeof (uint8_t*)))); } pinned_plug_entry->set_pre_plug_info_reloc_start (pre_plug_start - sizeof (uint8_t*)); } inline void gc_heap::relocate_shortened_obj_helper (uint8_t* x, size_t s, uint8_t* end, mark* pinned_plug_entry, BOOL is_pinned) { THREAD_FROM_HEAP; uint8_t* plug = pinned_plug (pinned_plug_entry); if (!is_pinned) { //// Temporary - we just wanna make sure we are doing things right when padding is needed. //if ((x + s) < plug) //{ // dprintf (3, ("obj %Ix needed padding: end %Ix is %d bytes from pinned obj %Ix", // x, (x + s), (plug- (x + s)), plug)); // GCToOSInterface::DebugBreak(); //} relocate_pre_plug_info (pinned_plug_entry); } verify_pins_with_post_plug_info("after relocate_pre_plug_info"); uint8_t* saved_plug_info_start = 0; uint8_t** saved_info_to_relocate = 0; if (is_pinned) { saved_plug_info_start = (uint8_t*)(pinned_plug_entry->get_post_plug_info_start()); saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_post_plug_reloc_info()); } else { saved_plug_info_start = (plug - sizeof (plug_and_gap)); saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_pre_plug_reloc_info()); } uint8_t** current_saved_info_to_relocate = 0; uint8_t* child = 0; dprintf (3, ("x: %Ix, pp: %Ix, end: %Ix", x, plug, end)); if (contain_pointers (x)) { dprintf (3,("s$%Ix$", (size_t)x)); go_through_object_nostart (method_table(x), x, s, pval, { dprintf (3, ("obj %Ix, member: %Ix->%Ix", x, (uint8_t*)pval, *pval)); if ((uint8_t*)pval >= end) { current_saved_info_to_relocate = saved_info_to_relocate + ((uint8_t*)pval - saved_plug_info_start) / sizeof (uint8_t**); child = *current_saved_info_to_relocate; reloc_ref_in_shortened_obj (pval, current_saved_info_to_relocate); dprintf (3, ("last part: R-%Ix(saved: %Ix)->%Ix ->%Ix", (uint8_t*)pval, current_saved_info_to_relocate, child, *current_saved_info_to_relocate)); } else { reloc_survivor_helper (pval); } }); } check_class_object_demotion (x); } void gc_heap::relocate_survivor_helper (uint8_t* plug, uint8_t* plug_end) { uint8_t* x = plug; while (x < plug_end) { size_t s = size (x); uint8_t* next_obj = x + Align (s); Prefetch (next_obj); relocate_obj_helper (x, s); assert (s > 0); x = next_obj; } } // if we expanded, right now we are not handling it as We are not saving the new reloc info. void gc_heap::verify_pins_with_post_plug_info (const char* msg) { #if defined (_DEBUG) && defined (VERIFY_HEAP) if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { if (!verify_pinned_queue_p) return; if (settings.heap_expansion) return; for (size_t i = 0; i < mark_stack_tos; i++) { mark& m = mark_stack_array[i]; mark* pinned_plug_entry = pinned_plug_of(i); if (pinned_plug_entry->has_post_plug_info() && pinned_plug_entry->post_short_p() && (pinned_plug_entry->saved_post_plug_debug.gap != 1)) { uint8_t* next_obj = pinned_plug_entry->get_post_plug_info_start() + sizeof (plug_and_gap); // object after pin dprintf (3, ("OFP: %Ix, G: %Ix, R: %Ix, LC: %d, RC: %d", next_obj, node_gap_size (next_obj), node_relocation_distance (next_obj), (int)node_left_child (next_obj), (int)node_right_child (next_obj))); size_t* post_plug_debug = (size_t*)(&m.saved_post_plug_debug); if (node_gap_size (next_obj) != *post_plug_debug) { dprintf (1, ("obj: %Ix gap should be %Ix but it is %Ix", next_obj, *post_plug_debug, (size_t)(node_gap_size (next_obj)))); FATAL_GC_ERROR(); } post_plug_debug++; // can't do node_relocation_distance here as it clears the left bit. //if (node_relocation_distance (next_obj) != *post_plug_debug) if (*((size_t*)(next_obj - 3 * sizeof (size_t))) != *post_plug_debug) { dprintf (1, ("obj: %Ix reloc should be %Ix but it is %Ix", next_obj, *post_plug_debug, (size_t)(node_relocation_distance (next_obj)))); FATAL_GC_ERROR(); } if (node_left_child (next_obj) > 0) { dprintf (1, ("obj: %Ix, vLC: %d\n", next_obj, (int)(node_left_child (next_obj)))); FATAL_GC_ERROR(); } } } dprintf (3, ("%s verified", msg)); } #else UNREFERENCED_PARAMETER(msg); #endif // _DEBUG && VERIFY_HEAP } #ifdef COLLECTIBLE_CLASS // We don't want to burn another ptr size space for pinned plugs to record this so just // set the card unconditionally for collectible objects if we are demoting. inline void gc_heap::unconditional_set_card_collectible (uint8_t* obj) { if (settings.demotion) { set_card (card_of (obj)); } } #endif //COLLECTIBLE_CLASS void gc_heap::relocate_shortened_survivor_helper (uint8_t* plug, uint8_t* plug_end, mark* pinned_plug_entry) { uint8_t* x = plug; uint8_t* p_plug = pinned_plug (pinned_plug_entry); BOOL is_pinned = (plug == p_plug); BOOL check_short_obj_p = (is_pinned ? pinned_plug_entry->post_short_p() : pinned_plug_entry->pre_short_p()); plug_end += sizeof (gap_reloc_pair); //dprintf (3, ("%s %Ix is shortened, and last object %s overwritten", (is_pinned ? "PP" : "NP"), plug, (check_short_obj_p ? "is" : "is not"))); dprintf (3, ("%s %Ix-%Ix short, LO: %s OW", (is_pinned ? "PP" : "NP"), plug, plug_end, (check_short_obj_p ? "is" : "is not"))); verify_pins_with_post_plug_info("begin reloc short surv"); while (x < plug_end) { if (check_short_obj_p && ((DWORD)(plug_end - x) < (DWORD)min_pre_pin_obj_size)) { dprintf (3, ("last obj %Ix is short", x)); if (is_pinned) { #ifdef COLLECTIBLE_CLASS if (pinned_plug_entry->post_short_collectible_p()) unconditional_set_card_collectible (x); #endif //COLLECTIBLE_CLASS // Relocate the saved references based on bits set. uint8_t** saved_plug_info_start = (uint8_t**)(pinned_plug_entry->get_post_plug_info_start()); uint8_t** saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_post_plug_reloc_info()); for (size_t i = 0; i < pinned_plug_entry->get_max_short_bits(); i++) { if (pinned_plug_entry->post_short_bit_p (i)) { reloc_ref_in_shortened_obj ((saved_plug_info_start + i), (saved_info_to_relocate + i)); } } } else { #ifdef COLLECTIBLE_CLASS if (pinned_plug_entry->pre_short_collectible_p()) unconditional_set_card_collectible (x); #endif //COLLECTIBLE_CLASS relocate_pre_plug_info (pinned_plug_entry); // Relocate the saved references based on bits set. uint8_t** saved_plug_info_start = (uint8_t**)(p_plug - sizeof (plug_and_gap)); uint8_t** saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_pre_plug_reloc_info()); for (size_t i = 0; i < pinned_plug_entry->get_max_short_bits(); i++) { if (pinned_plug_entry->pre_short_bit_p (i)) { reloc_ref_in_shortened_obj ((saved_plug_info_start + i), (saved_info_to_relocate + i)); } } } break; } size_t s = size (x); uint8_t* next_obj = x + Align (s); Prefetch (next_obj); if (next_obj >= plug_end) { dprintf (3, ("object %Ix is at the end of the plug %Ix->%Ix", next_obj, plug, plug_end)); verify_pins_with_post_plug_info("before reloc short obj"); relocate_shortened_obj_helper (x, s, (x + Align (s) - sizeof (plug_and_gap)), pinned_plug_entry, is_pinned); } else { relocate_obj_helper (x, s); } assert (s > 0); x = next_obj; } verify_pins_with_post_plug_info("end reloc short surv"); } void gc_heap::relocate_survivors_in_plug (uint8_t* plug, uint8_t* plug_end, BOOL check_last_object_p, mark* pinned_plug_entry) { dprintf (3,("RP: [%Ix(%Ix->%Ix),%Ix(%Ix->%Ix)[", (size_t)plug, brick_of (plug), (size_t)brick_table[brick_of (plug)], (size_t)plug_end, brick_of (plug_end), (size_t)brick_table[brick_of (plug_end)])); if (check_last_object_p) { relocate_shortened_survivor_helper (plug, plug_end, pinned_plug_entry); } else { relocate_survivor_helper (plug, plug_end); } } void gc_heap::relocate_survivors_in_brick (uint8_t* tree, relocate_args* args) { assert ((tree != NULL)); dprintf (3, ("tree: %Ix, args->last_plug: %Ix, left: %Ix, right: %Ix, gap(t): %Ix", tree, args->last_plug, (tree + node_left_child (tree)), (tree + node_right_child (tree)), node_gap_size (tree))); if (node_left_child (tree)) { relocate_survivors_in_brick (tree + node_left_child (tree), args); } { uint8_t* plug = tree; BOOL has_post_plug_info_p = FALSE; BOOL has_pre_plug_info_p = FALSE; if (tree == oldest_pinned_plug) { args->pinned_plug_entry = get_oldest_pinned_entry (&has_pre_plug_info_p, &has_post_plug_info_p); assert (tree == pinned_plug (args->pinned_plug_entry)); dprintf (3, ("tree is the oldest pin: %Ix", tree)); } if (args->last_plug) { size_t gap_size = node_gap_size (tree); uint8_t* gap = (plug - gap_size); dprintf (3, ("tree: %Ix, gap: %Ix (%Ix)", tree, gap, gap_size)); assert (gap_size >= Align (min_obj_size)); uint8_t* last_plug_end = gap; BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p); { relocate_survivors_in_plug (args->last_plug, last_plug_end, check_last_object_p, args->pinned_plug_entry); } } else { assert (!has_pre_plug_info_p); } args->last_plug = plug; args->is_shortened = has_post_plug_info_p; if (has_post_plug_info_p) { dprintf (3, ("setting %Ix as shortened", plug)); } dprintf (3, ("last_plug: %Ix(shortened: %d)", plug, (args->is_shortened ? 1 : 0))); } if (node_right_child (tree)) { relocate_survivors_in_brick (tree + node_right_child (tree), args); } } inline void gc_heap::update_oldest_pinned_plug() { oldest_pinned_plug = (pinned_plug_que_empty_p() ? 0 : pinned_plug (oldest_pin())); } heap_segment* gc_heap::get_start_segment (generation* gen) { heap_segment* start_heap_segment = heap_segment_rw (generation_start_segment (gen)); #ifdef USE_REGIONS heap_segment* current_heap_segment = heap_segment_non_sip (start_heap_segment); if (current_heap_segment != start_heap_segment) { dprintf (REGIONS_LOG, ("h%d skipped gen%d SIP regions, start %Ix->%Ix", heap_number, (current_heap_segment ? heap_segment_gen_num (current_heap_segment) : -1), heap_segment_mem (start_heap_segment), (current_heap_segment ? heap_segment_mem (current_heap_segment) : 0))); } start_heap_segment = current_heap_segment; #endif //USE_REGIONS return start_heap_segment; } void gc_heap::relocate_survivors (int condemned_gen_number, uint8_t* first_condemned_address) { reset_pinned_queue_bos(); update_oldest_pinned_plug(); int stop_gen_idx = get_stop_generation_index (condemned_gen_number); #ifndef USE_REGIONS assert (first_condemned_address == generation_allocation_start (generation_of (condemned_gen_number))); #endif //!USE_REGIONS for (int i = condemned_gen_number; i >= stop_gen_idx; i--) { generation* condemned_gen = generation_of (i); heap_segment* current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen)); #ifdef USE_REGIONS current_heap_segment = relocate_advance_to_non_sip (current_heap_segment); if (!current_heap_segment) continue; #endif //USE_REGIONS uint8_t* start_address = get_soh_start_object (current_heap_segment, condemned_gen); size_t current_brick = brick_of (start_address); PREFIX_ASSUME(current_heap_segment != NULL); uint8_t* end_address = heap_segment_allocated (current_heap_segment); size_t end_brick = brick_of (end_address - 1); relocate_args args; args.is_shortened = FALSE; args.pinned_plug_entry = 0; args.last_plug = 0; while (1) { if (current_brick > end_brick) { if (args.last_plug) { { assert (!(args.is_shortened)); relocate_survivors_in_plug (args.last_plug, heap_segment_allocated (current_heap_segment), args.is_shortened, args.pinned_plug_entry); } args.last_plug = 0; } heap_segment* next_heap_segment = heap_segment_next (current_heap_segment); if (next_heap_segment) { #ifdef USE_REGIONS next_heap_segment = relocate_advance_to_non_sip (next_heap_segment); #endif //USE_REGIONS if (next_heap_segment) { current_heap_segment = next_heap_segment; current_brick = brick_of (heap_segment_mem (current_heap_segment)); end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1); continue; } else break; } else { break; } } { int brick_entry = brick_table [ current_brick ]; if (brick_entry >= 0) { relocate_survivors_in_brick (brick_address (current_brick) + brick_entry -1, &args); } } current_brick++; } } } void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, walk_relocate_args* args) { if (check_last_object_p) { size += sizeof (gap_reloc_pair); mark* entry = args->pinned_plug_entry; if (args->is_shortened) { assert (entry->has_post_plug_info()); entry->swap_post_plug_and_saved_for_profiler(); } else { assert (entry->has_pre_plug_info()); entry->swap_pre_plug_and_saved_for_profiler(); } } ptrdiff_t last_plug_relocation = node_relocation_distance (plug); STRESS_LOG_PLUG_MOVE(plug, (plug + size), -last_plug_relocation); ptrdiff_t reloc = settings.compaction ? last_plug_relocation : 0; (args->fn) (plug, (plug + size), reloc, args->profiling_context, !!settings.compaction, false); if (check_last_object_p) { mark* entry = args->pinned_plug_entry; if (args->is_shortened) { entry->swap_post_plug_and_saved_for_profiler(); } else { entry->swap_pre_plug_and_saved_for_profiler(); } } } void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args) { assert ((tree != NULL)); if (node_left_child (tree)) { walk_relocation_in_brick (tree + node_left_child (tree), args); } uint8_t* plug = tree; BOOL has_pre_plug_info_p = FALSE; BOOL has_post_plug_info_p = FALSE; if (tree == oldest_pinned_plug) { args->pinned_plug_entry = get_oldest_pinned_entry (&has_pre_plug_info_p, &has_post_plug_info_p); assert (tree == pinned_plug (args->pinned_plug_entry)); } if (args->last_plug != 0) { size_t gap_size = node_gap_size (tree); uint8_t* gap = (plug - gap_size); uint8_t* last_plug_end = gap; size_t last_plug_size = (last_plug_end - args->last_plug); dprintf (3, ("tree: %Ix, last_plug: %Ix, gap: %Ix(%Ix), last_plug_end: %Ix, size: %Ix", tree, args->last_plug, gap, gap_size, last_plug_end, last_plug_size)); BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p); if (!check_last_object_p) { assert (last_plug_size >= Align (min_obj_size)); } walk_plug (args->last_plug, last_plug_size, check_last_object_p, args); } else { assert (!has_pre_plug_info_p); } dprintf (3, ("set args last plug to plug: %Ix", plug)); args->last_plug = plug; args->is_shortened = has_post_plug_info_p; if (node_right_child (tree)) { walk_relocation_in_brick (tree + node_right_child (tree), args); } } void gc_heap::walk_relocation (void* profiling_context, record_surv_fn fn) { int condemned_gen_number = settings.condemned_generation; int stop_gen_idx = get_stop_generation_index (condemned_gen_number); for (int i = condemned_gen_number; i >= stop_gen_idx; i--) { generation* condemned_gen = generation_of (i); heap_segment* current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen)); uint8_t* start_address = get_soh_start_object (current_heap_segment, condemned_gen); size_t current_brick = brick_of (start_address); PREFIX_ASSUME(current_heap_segment != NULL); reset_pinned_queue_bos(); update_oldest_pinned_plug(); size_t end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1); walk_relocate_args args; args.is_shortened = FALSE; args.pinned_plug_entry = 0; args.last_plug = 0; args.profiling_context = profiling_context; args.fn = fn; while (1) { if (current_brick > end_brick) { if (args.last_plug) { walk_plug (args.last_plug, (heap_segment_allocated (current_heap_segment) - args.last_plug), args.is_shortened, &args); args.last_plug = 0; } if (heap_segment_next_rw (current_heap_segment)) { current_heap_segment = heap_segment_next_rw (current_heap_segment); current_brick = brick_of (heap_segment_mem (current_heap_segment)); end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1); continue; } else { break; } } { int brick_entry = brick_table [ current_brick ]; if (brick_entry >= 0) { walk_relocation_in_brick (brick_address (current_brick) + brick_entry - 1, &args); } } current_brick++; } } } void gc_heap::walk_survivors (record_surv_fn fn, void* context, walk_surv_type type) { if (type == walk_for_gc) walk_survivors_relocation (context, fn); #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) else if (type == walk_for_bgc) walk_survivors_for_bgc (context, fn); #endif //BACKGROUND_GC && FEATURE_EVENT_TRACE else assert (!"unknown type!"); } #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) void gc_heap::walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn) { assert(settings.concurrent); for (int i = get_start_generation_index(); i < total_generation_count; i++) { int align_const = get_alignment_constant (i == max_generation); heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (i))); while (seg) { uint8_t* o = heap_segment_mem (seg); uint8_t* end = heap_segment_allocated (seg); while (o < end) { if (method_table(o) == g_gc_pFreeObjectMethodTable) { o += Align (size (o), align_const); continue; } // It's survived. Make a fake plug, starting at o, // and send the event uint8_t* plug_start = o; while (method_table(o) != g_gc_pFreeObjectMethodTable) { o += Align (size (o), align_const); if (o >= end) { break; } } uint8_t* plug_end = o; fn (plug_start, plug_end, 0, // Reloc distance == 0 as this is non-compacting profiling_context, false, // Non-compacting true); // BGC } seg = heap_segment_next (seg); } } } #endif //BACKGROUND_GC && FEATURE_EVENT_TRACE void gc_heap::relocate_phase (int condemned_gen_number, uint8_t* first_condemned_address) { ScanContext sc; sc.thread_number = heap_number; sc.promotion = FALSE; sc.concurrent = FALSE; #ifdef MULTIPLE_HEAPS //join all threads to make sure they are synchronized dprintf(3, ("Joining after end of plan")); gc_t_join.join(this, gc_join_begin_relocate_phase); if (gc_t_join.joined()) { #endif //MULTIPLE_HEAPS #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { gc_time_info[time_relocate] = GetHighPrecisionTimeStamp(); } #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS //join all threads to make sure they are synchronized dprintf(3, ("Restarting for relocation")); gc_t_join.restart(); } #endif //MULTIPLE_HEAPS dprintf (2, (ThreadStressLog::gcStartRelocateMsg(), heap_number)); dprintf(3,("Relocating roots")); GCScan::GcScanRoots(GCHeap::Relocate, condemned_gen_number, max_generation, &sc); verify_pins_with_post_plug_info("after reloc stack"); #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { scan_background_roots (GCHeap::Relocate, heap_number, &sc); } #endif //BACKGROUND_GC #ifdef FEATURE_CARD_MARKING_STEALING // for card marking stealing, do the other relocations *before* we scan the older generations // this gives us a chance to make up for imbalance in these phases later { dprintf(3, ("Relocating survivors")); relocate_survivors(condemned_gen_number, first_condemned_address); } #ifdef FEATURE_PREMORTEM_FINALIZATION dprintf(3, ("Relocating finalization data")); finalize_queue->RelocateFinalizationData(condemned_gen_number, __this); #endif // FEATURE_PREMORTEM_FINALIZATION { dprintf(3, ("Relocating handle table")); GCScan::GcScanHandles(GCHeap::Relocate, condemned_gen_number, max_generation, &sc); } #endif // FEATURE_CARD_MARKING_STEALING if (condemned_gen_number != max_generation) { #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) if (!card_mark_done_soh) #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING { dprintf (3, ("Relocating cross generation pointers on heap %d", heap_number)); mark_through_cards_for_segments(&gc_heap::relocate_address, TRUE THIS_ARG); verify_pins_with_post_plug_info("after reloc cards"); #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) card_mark_done_soh = true; #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING } } if (condemned_gen_number != max_generation) { #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) if (!card_mark_done_uoh) #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING { dprintf (3, ("Relocating cross generation pointers for uoh objects on heap %d", heap_number)); for (int i = uoh_start_generation; i < total_generation_count; i++) { #ifndef ALLOW_REFERENCES_IN_POH if (i != poh_generation) #endif //ALLOW_REFERENCES_IN_POH mark_through_cards_for_uoh_objects(&gc_heap::relocate_address, i, TRUE THIS_ARG); } #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) card_mark_done_uoh = true; #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING } } else { #ifdef FEATURE_LOH_COMPACTION if (loh_compacted_p) { assert (settings.condemned_generation == max_generation); relocate_in_loh_compact(); } else #endif //FEATURE_LOH_COMPACTION { relocate_in_uoh_objects (loh_generation); } #ifdef ALLOW_REFERENCES_IN_POH relocate_in_uoh_objects (poh_generation); #endif } #ifndef FEATURE_CARD_MARKING_STEALING // moved this code *before* we scan the older generations via mark_through_cards_xxx // this gives us a chance to have mark_through_cards_xxx make up for imbalance in the other relocations { dprintf(3,("Relocating survivors")); relocate_survivors (condemned_gen_number, first_condemned_address); } #ifdef FEATURE_PREMORTEM_FINALIZATION dprintf(3,("Relocating finalization data")); finalize_queue->RelocateFinalizationData (condemned_gen_number, __this); #endif // FEATURE_PREMORTEM_FINALIZATION { dprintf(3,("Relocating handle table")); GCScan::GcScanHandles(GCHeap::Relocate, condemned_gen_number, max_generation, &sc); } #endif // !FEATURE_CARD_MARKING_STEALING #if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING) if (condemned_gen_number != max_generation) { // check the other heaps cyclically and try to help out where the relocation isn't done for (int i = 0; i < gc_heap::n_heaps; i++) { int heap_number_to_look_at = (i + heap_number) % gc_heap::n_heaps; gc_heap* hp = gc_heap::g_heaps[heap_number_to_look_at]; if (!hp->card_mark_done_soh) { dprintf(3, ("Relocating cross generation pointers on heap %d", hp->heap_number)); hp->mark_through_cards_for_segments(&gc_heap::relocate_address, TRUE THIS_ARG); hp->card_mark_done_soh = true; } if (!hp->card_mark_done_uoh) { dprintf(3, ("Relocating cross generation pointers for uoh objects on heap %d", hp->heap_number)); for (int i = uoh_start_generation; i < total_generation_count; i++) { #ifndef ALLOW_REFERENCES_IN_POH if (i != poh_generation) #endif //ALLOW_REFERENCES_IN_POH hp->mark_through_cards_for_uoh_objects(&gc_heap::relocate_address, i, TRUE THIS_ARG); } hp->card_mark_done_uoh = true; } } } #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING dprintf(2, (ThreadStressLog::gcEndRelocateMsg(), heap_number)); } // This compares to see if tree is the current pinned plug and returns info // for this pinned plug. Also advances the pinned queue if that's the case. // // We don't change the values of the plug info if tree is not the same as // the current pinned plug - the caller is responsible for setting the right // values to begin with. // // POPO TODO: We are keeping this temporarily as this is also used by realloc // where it passes FALSE to deque_p, change it to use the same optimization // as relocate. Not as essential since realloc is already a slow path. mark* gc_heap::get_next_pinned_entry (uint8_t* tree, BOOL* has_pre_plug_info_p, BOOL* has_post_plug_info_p, BOOL deque_p) { if (!pinned_plug_que_empty_p()) { mark* oldest_entry = oldest_pin(); uint8_t* oldest_plug = pinned_plug (oldest_entry); if (tree == oldest_plug) { *has_pre_plug_info_p = oldest_entry->has_pre_plug_info(); *has_post_plug_info_p = oldest_entry->has_post_plug_info(); if (deque_p) { deque_pinned_plug(); } dprintf (3, ("found a pinned plug %Ix, pre: %d, post: %d", tree, (*has_pre_plug_info_p ? 1 : 0), (*has_post_plug_info_p ? 1 : 0))); return oldest_entry; } } return NULL; } // This also deques the oldest entry and update the oldest plug mark* gc_heap::get_oldest_pinned_entry (BOOL* has_pre_plug_info_p, BOOL* has_post_plug_info_p) { mark* oldest_entry = oldest_pin(); *has_pre_plug_info_p = oldest_entry->has_pre_plug_info(); *has_post_plug_info_p = oldest_entry->has_post_plug_info(); deque_pinned_plug(); update_oldest_pinned_plug(); return oldest_entry; } inline void gc_heap::copy_cards_range (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p) { if (copy_cards_p) copy_cards_for_addresses (dest, src, len); else clear_card_for_addresses (dest, dest + len); } // POPO TODO: We should actually just recover the artificially made gaps here..because when we copy // we always copy the earlier plugs first which means we won't need the gap sizes anymore. This way // we won't need to individually recover each overwritten part of plugs. inline void gc_heap::gcmemcopy (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p) { if (dest != src) { #ifdef BACKGROUND_GC if (current_c_gc_state == c_gc_state_marking) { //TODO: should look to see whether we should consider changing this // to copy a consecutive region of the mark array instead. copy_mark_bits_for_addresses (dest, src, len); } #endif //BACKGROUND_GC #ifdef DOUBLY_LINKED_FL BOOL set_bgc_mark_bits_p = is_plug_bgc_mark_bit_set (src); if (set_bgc_mark_bits_p) { clear_plug_bgc_mark_bit (src); } BOOL make_free_obj_p = FALSE; if (len <= min_free_item_no_prev) { make_free_obj_p = is_free_obj_in_compact_bit_set (src); if (make_free_obj_p) { clear_free_obj_in_compact_bit (src); } } #endif //DOUBLY_LINKED_FL //dprintf(3,(" Memcopy [%Ix->%Ix, %Ix->%Ix[", (size_t)src, (size_t)dest, (size_t)src+len, (size_t)dest+len)); dprintf(3,(ThreadStressLog::gcMemCopyMsg(), (size_t)src, (size_t)dest, (size_t)src+len, (size_t)dest+len)); memcopy (dest - plug_skew, src - plug_skew, len); #ifdef DOUBLY_LINKED_FL if (set_bgc_mark_bits_p) { uint8_t* dest_o = dest; uint8_t* dest_end_o = dest + len; while (dest_o < dest_end_o) { uint8_t* next_o = dest_o + Align (size (dest_o)); background_mark (dest_o, background_saved_lowest_address, background_saved_highest_address); dest_o = next_o; } dprintf (3333, ("[h%d] GM: %Ix(%Ix-%Ix)->%Ix(%Ix-%Ix)", heap_number, dest, (size_t)(&mark_array [mark_word_of (dest)]), (size_t)(mark_array [mark_word_of (dest)]), dest_end_o, (size_t)(&mark_array [mark_word_of (dest_o)]), (size_t)(mark_array [mark_word_of (dest_o)]))); } if (make_free_obj_p) { size_t* filler_free_obj_size_location = (size_t*)(dest + min_free_item_no_prev); size_t filler_free_obj_size = *filler_free_obj_size_location; make_unused_array ((dest + len), filler_free_obj_size); dprintf (3333, ("[h%d] smallobj, %Ix(%Id): %Ix->%Ix", heap_number, filler_free_obj_size_location, filler_free_obj_size, (dest + len), (dest + len + filler_free_obj_size))); } #endif //DOUBLY_LINKED_FL #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP if (SoftwareWriteWatch::IsEnabledForGCHeap()) { // The ranges [src - plug_kew .. src[ and [src + len - plug_skew .. src + len[ are ObjHeaders, which don't have GC // references, and are not relevant for write watch. The latter range actually corresponds to the ObjHeader for the // object at (src + len), so it can be ignored anyway. SoftwareWriteWatch::SetDirtyRegion(dest, len - plug_skew); } #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP copy_cards_range (dest, src, len, copy_cards_p); } } void gc_heap::compact_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, compact_args* args) { args->print(); uint8_t* reloc_plug = plug + args->last_plug_relocation; if (check_last_object_p) { size += sizeof (gap_reloc_pair); mark* entry = args->pinned_plug_entry; if (args->is_shortened) { assert (entry->has_post_plug_info()); entry->swap_post_plug_and_saved(); } else { assert (entry->has_pre_plug_info()); entry->swap_pre_plug_and_saved(); } } int old_brick_entry = brick_table [brick_of (plug)]; assert (node_relocation_distance (plug) == args->last_plug_relocation); #ifdef FEATURE_STRUCTALIGN ptrdiff_t alignpad = node_alignpad(plug); if (alignpad) { make_unused_array (reloc_plug - alignpad, alignpad); if (brick_of (reloc_plug - alignpad) != brick_of (reloc_plug)) { // The alignment padding is straddling one or more bricks; // it has to be the last "object" of its first brick. fix_brick_to_highest (reloc_plug - alignpad, reloc_plug); } } #else // FEATURE_STRUCTALIGN size_t unused_arr_size = 0; BOOL already_padded_p = FALSE; #ifdef SHORT_PLUGS if (is_plug_padded (plug)) { already_padded_p = TRUE; clear_plug_padded (plug); unused_arr_size = Align (min_obj_size); } #endif //SHORT_PLUGS if (node_realigned (plug)) { unused_arr_size += switch_alignment_size (already_padded_p); } if (unused_arr_size != 0) { make_unused_array (reloc_plug - unused_arr_size, unused_arr_size); if (brick_of (reloc_plug - unused_arr_size) != brick_of (reloc_plug)) { dprintf (3, ("fix B for padding: %Id: %Ix->%Ix", unused_arr_size, (reloc_plug - unused_arr_size), reloc_plug)); // The alignment padding is straddling one or more bricks; // it has to be the last "object" of its first brick. fix_brick_to_highest (reloc_plug - unused_arr_size, reloc_plug); } } #endif // FEATURE_STRUCTALIGN #ifdef SHORT_PLUGS if (is_plug_padded (plug)) { make_unused_array (reloc_plug - Align (min_obj_size), Align (min_obj_size)); if (brick_of (reloc_plug - Align (min_obj_size)) != brick_of (reloc_plug)) { // The alignment padding is straddling one or more bricks; // it has to be the last "object" of its first brick. fix_brick_to_highest (reloc_plug - Align (min_obj_size), reloc_plug); } } #endif //SHORT_PLUGS gcmemcopy (reloc_plug, plug, size, args->copy_cards_p); if (args->check_gennum_p) { int src_gennum = args->src_gennum; if (src_gennum == -1) { src_gennum = object_gennum (plug); } int dest_gennum = object_gennum_plan (reloc_plug); if (src_gennum < dest_gennum) { generation_allocation_size (generation_of (dest_gennum)) += size; } } size_t current_reloc_brick = args->current_compacted_brick; if (brick_of (reloc_plug) != current_reloc_brick) { dprintf (3, ("last reloc B: %Ix, current reloc B: %Ix", current_reloc_brick, brick_of (reloc_plug))); if (args->before_last_plug) { dprintf (3,(" fixing last brick %Ix to point to last plug %Ix(%Ix)", current_reloc_brick, args->before_last_plug, (args->before_last_plug - brick_address (current_reloc_brick)))); { set_brick (current_reloc_brick, args->before_last_plug - brick_address (current_reloc_brick)); } } current_reloc_brick = brick_of (reloc_plug); } size_t end_brick = brick_of (reloc_plug + size-1); if (end_brick != current_reloc_brick) { // The plug is straddling one or more bricks // It has to be the last plug of its first brick dprintf (3,("plug spanning multiple bricks, fixing first brick %Ix to %Ix(%Ix)", current_reloc_brick, (size_t)reloc_plug, (reloc_plug - brick_address (current_reloc_brick)))); { set_brick (current_reloc_brick, reloc_plug - brick_address (current_reloc_brick)); } // update all intervening brick size_t brick = current_reloc_brick + 1; dprintf (3,("setting intervening bricks %Ix->%Ix to -1", brick, (end_brick - 1))); while (brick < end_brick) { set_brick (brick, -1); brick++; } // code last brick offset as a plug address args->before_last_plug = brick_address (end_brick) -1; current_reloc_brick = end_brick; dprintf (3, ("setting before last to %Ix, last brick to %Ix", args->before_last_plug, current_reloc_brick)); } else { dprintf (3, ("still in the same brick: %Ix", end_brick)); args->before_last_plug = reloc_plug; } args->current_compacted_brick = current_reloc_brick; if (check_last_object_p) { mark* entry = args->pinned_plug_entry; if (args->is_shortened) { entry->swap_post_plug_and_saved(); } else { entry->swap_pre_plug_and_saved(); } } } void gc_heap::compact_in_brick (uint8_t* tree, compact_args* args) { assert (tree != NULL); int left_node = node_left_child (tree); int right_node = node_right_child (tree); ptrdiff_t relocation = node_relocation_distance (tree); args->print(); if (left_node) { dprintf (3, ("B: L: %d->%Ix", left_node, (tree + left_node))); compact_in_brick ((tree + left_node), args); } uint8_t* plug = tree; BOOL has_pre_plug_info_p = FALSE; BOOL has_post_plug_info_p = FALSE; if (tree == oldest_pinned_plug) { args->pinned_plug_entry = get_oldest_pinned_entry (&has_pre_plug_info_p, &has_post_plug_info_p); assert (tree == pinned_plug (args->pinned_plug_entry)); } if (args->last_plug != 0) { size_t gap_size = node_gap_size (tree); uint8_t* gap = (plug - gap_size); uint8_t* last_plug_end = gap; size_t last_plug_size = (last_plug_end - args->last_plug); assert ((last_plug_size & (sizeof(PTR_PTR) - 1)) == 0); dprintf (3, ("tree: %Ix, last_plug: %Ix, gap: %Ix(%Ix), last_plug_end: %Ix, size: %Ix", tree, args->last_plug, gap, gap_size, last_plug_end, last_plug_size)); BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p); if (!check_last_object_p) { assert (last_plug_size >= Align (min_obj_size)); } compact_plug (args->last_plug, last_plug_size, check_last_object_p, args); } else { assert (!has_pre_plug_info_p); } dprintf (3, ("set args last plug to plug: %Ix, reloc: %Ix", plug, relocation)); args->last_plug = plug; args->last_plug_relocation = relocation; args->is_shortened = has_post_plug_info_p; if (right_node) { dprintf (3, ("B: R: %d->%Ix", right_node, (tree + right_node))); compact_in_brick ((tree + right_node), args); } } // This returns the recovered size for gen2 plugs as that's what we need // mostly - would be nice to make it work for all generations. size_t gc_heap::recover_saved_pinned_info() { reset_pinned_queue_bos(); size_t total_recovered_sweep_size = 0; while (!(pinned_plug_que_empty_p())) { mark* oldest_entry = oldest_pin(); size_t recovered_sweep_size = oldest_entry->recover_plug_info(); if (recovered_sweep_size > 0) { uint8_t* plug = pinned_plug (oldest_entry); if (object_gennum (plug) == max_generation) { dprintf (3, ("recovered %Ix(%Id) from pin", plug, recovered_sweep_size)); total_recovered_sweep_size += recovered_sweep_size; } } #ifdef GC_CONFIG_DRIVEN if (oldest_entry->has_pre_plug_info() && oldest_entry->has_post_plug_info()) record_interesting_data_point (idp_pre_and_post_pin); else if (oldest_entry->has_pre_plug_info()) record_interesting_data_point (idp_pre_pin); else if (oldest_entry->has_post_plug_info()) record_interesting_data_point (idp_post_pin); #endif //GC_CONFIG_DRIVEN deque_pinned_plug(); } return total_recovered_sweep_size; } void gc_heap::compact_phase (int condemned_gen_number, uint8_t* first_condemned_address, BOOL clear_cards) { #ifdef MULTIPLE_HEAPS dprintf(3, ("Joining after end of relocation")); gc_t_join.join(this, gc_join_relocate_phase_done); if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_EVENT_TRACE if (informational_event_enabled_p) { gc_time_info[time_compact] = GetHighPrecisionTimeStamp(); gc_time_info[time_relocate] = gc_time_info[time_compact] - gc_time_info[time_relocate]; } #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS dprintf(3, ("Restarting for compaction")); gc_t_join.restart(); #endif //MULTIPLE_HEAPS } dprintf (2, (ThreadStressLog::gcStartCompactMsg(), heap_number, first_condemned_address, brick_of (first_condemned_address))); #ifdef FEATURE_LOH_COMPACTION if (loh_compacted_p) { compact_loh(); } #endif //FEATURE_LOH_COMPACTION reset_pinned_queue_bos(); update_oldest_pinned_plug(); BOOL reused_seg = expand_reused_seg_p(); if (reused_seg) { for (int i = 1; i <= max_generation; i++) { generation_allocation_size (generation_of (i)) = 0; } } int stop_gen_idx = get_stop_generation_index (condemned_gen_number); for (int i = condemned_gen_number; i >= stop_gen_idx; i--) { generation* condemned_gen = generation_of (i); heap_segment* current_heap_segment = get_start_segment (condemned_gen); #ifdef USE_REGIONS if (!current_heap_segment) continue; size_t current_brick = brick_of (heap_segment_mem (current_heap_segment)); #else size_t current_brick = brick_of (first_condemned_address); #endif //USE_REGIONS uint8_t* end_address = heap_segment_allocated (current_heap_segment); #ifndef USE_REGIONS if ((first_condemned_address >= end_address) && (condemned_gen_number < max_generation)) { return; } #endif //!USE_REGIONS size_t end_brick = brick_of (end_address-1); compact_args args; args.last_plug = 0; args.before_last_plug = 0; args.current_compacted_brick = ~((size_t)1); args.is_shortened = FALSE; args.pinned_plug_entry = 0; args.copy_cards_p = (condemned_gen_number >= 1) || !clear_cards; args.check_gennum_p = reused_seg; if (args.check_gennum_p) { args.src_gennum = ((current_heap_segment == ephemeral_heap_segment) ? -1 : 2); } #ifdef USE_REGIONS assert (!args.check_gennum_p); #endif //USE_REGIONS while (1) { if (current_brick > end_brick) { if (args.last_plug != 0) { dprintf (3, ("compacting last plug: %Ix", args.last_plug)) compact_plug (args.last_plug, (heap_segment_allocated (current_heap_segment) - args.last_plug), args.is_shortened, &args); } heap_segment* next_heap_segment = heap_segment_next_non_sip (current_heap_segment); if (next_heap_segment) { current_heap_segment = next_heap_segment; current_brick = brick_of (heap_segment_mem (current_heap_segment)); end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1); args.last_plug = 0; if (args.check_gennum_p) { args.src_gennum = ((current_heap_segment == ephemeral_heap_segment) ? -1 : 2); } continue; } else { if (args.before_last_plug !=0) { dprintf (3, ("Fixing last brick %Ix to point to plug %Ix", args.current_compacted_brick, (size_t)args.before_last_plug)); assert (args.current_compacted_brick != ~1u); set_brick (args.current_compacted_brick, args.before_last_plug - brick_address (args.current_compacted_brick)); } break; } } { int brick_entry = brick_table [ current_brick ]; dprintf (3, ("B: %Ix(%Ix)->%Ix", current_brick, (size_t)brick_entry, (brick_address (current_brick) + brick_entry - 1))); if (brick_entry >= 0) { compact_in_brick ((brick_address (current_brick) + brick_entry -1), &args); } } current_brick++; } } recover_saved_pinned_info(); concurrent_print_time_delta ("compact end"); dprintf (2, (ThreadStressLog::gcEndCompactMsg(), heap_number)); } #ifdef MULTIPLE_HEAPS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4702) // C4702: unreachable code: gc_thread_function may not return #endif //_MSC_VER void gc_heap::gc_thread_stub (void* arg) { gc_heap* heap = (gc_heap*)arg; if (!gc_thread_no_affinitize_p) { // We are about to set affinity for GC threads. It is a good place to set up NUMA and // CPU groups because the process mask, processor number, and group number are all // readily available. set_thread_affinity_for_heap (heap->heap_number, heap_select::find_proc_no_from_heap_no (heap->heap_number)); } // server GC threads run at a higher priority than normal. GCToOSInterface::BoostThreadPriority(); void* tmp = _alloca (256*heap->heap_number); heap->gc_thread_function(); } #ifdef _MSC_VER #pragma warning(pop) #endif //_MSC_VER #endif //MULTIPLE_HEAPS #ifdef BACKGROUND_GC #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4702) // C4702: unreachable code: gc_thread_function may not return #endif //_MSC_VER void gc_heap::bgc_thread_stub (void* arg) { gc_heap* heap = (gc_heap*)arg; heap->bgc_thread = GCToEEInterface::GetThread(); assert(heap->bgc_thread != nullptr); heap->bgc_thread_function(); } #ifdef _MSC_VER #pragma warning(pop) #endif //_MSC_VER void gc_heap::background_drain_mark_list (int thread) { #ifndef MULTIPLE_HEAPS UNREFERENCED_PARAMETER(thread); #endif //!MULTIPLE_HEAPS size_t saved_c_mark_list_index = c_mark_list_index; if (saved_c_mark_list_index) { concurrent_print_time_delta ("SML"); } while (c_mark_list_index != 0) { size_t current_index = c_mark_list_index - 1; uint8_t* o = c_mark_list [current_index]; background_mark_object (o THREAD_NUMBER_ARG); c_mark_list_index--; } if (saved_c_mark_list_index) { concurrent_print_time_delta ("EML"); } fire_drain_mark_list_event (saved_c_mark_list_index); } // The background GC version of scan_dependent_handles (see that method for a more in-depth comment). #ifdef MULTIPLE_HEAPS // Since we only scan dependent handles while we are stopped we'll never interfere with FGCs scanning // them. So we can use the same static variables. void gc_heap::background_scan_dependent_handles (ScanContext *sc) { // Whenever we call this method there may have been preceding object promotions. So set // s_fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set // based on the how the scanning proceeded). s_fUnscannedPromotions = TRUE; // We don't know how many times we need to loop yet. In particular we can't base the loop condition on // the state of this thread's portion of the dependent handle table. That's because promotions on other // threads could cause handle promotions to become necessary here. Even if there are definitely no more // promotions possible in this thread's handles, we still have to stay in lock-step with those worker // threads that haven't finished yet (each GC worker thread has to join exactly the same number of times // as all the others or they'll get out of step). while (true) { // The various worker threads are all currently racing in this code. We need to work out if at least // one of them think they have work to do this cycle. Each thread needs to rescan its portion of the // dependent handle table when both of the following conditions apply: // 1) At least one (arbitrary) object might have been promoted since the last scan (because if this // object happens to correspond to a primary in one of our handles we might potentially have to // promote the associated secondary). // 2) The table for this thread has at least one handle with a secondary that isn't promoted yet. // // The first condition is represented by s_fUnscannedPromotions. This is always non-zero for the first // iteration of this loop (see comment above) and in subsequent cycles each thread updates this // whenever a mark stack overflow occurs or scanning their dependent handles results in a secondary // being promoted. This value is cleared back to zero in a synchronized fashion in the join that // follows below. Note that we can't read this outside of the join since on any iteration apart from // the first threads will be racing between reading this value and completing their previous // iteration's table scan. // // The second condition is tracked by the dependent handle code itself on a per worker thread basis // (and updated by the GcDhReScan() method). We call GcDhUnpromotedHandlesExist() on each thread to // determine the local value and collect the results into the s_fUnpromotedHandles variable in what is // effectively an OR operation. As per s_fUnscannedPromotions we can't read the final result until // we're safely joined. if (GCScan::GcDhUnpromotedHandlesExist(sc)) s_fUnpromotedHandles = TRUE; // Synchronize all the threads so we can read our state variables safely. The following shared // variable (indicating whether we should scan the tables or terminate the loop) will be set by a // single thread inside the join. bgc_t_join.join(this, gc_join_scan_dependent_handles); if (bgc_t_join.joined()) { // We're synchronized so it's safe to read our shared state variables. We update another shared // variable to indicate to all threads whether we'll be scanning for another cycle or terminating // the loop. We scan if there has been at least one object promotion since last time and at least // one thread has a dependent handle table with a potential handle promotion possible. s_fScanRequired = s_fUnscannedPromotions && s_fUnpromotedHandles; // Reset our shared state variables (ready to be set again on this scan or with a good initial // value for the next call if we're terminating the loop). s_fUnscannedPromotions = FALSE; s_fUnpromotedHandles = FALSE; if (!s_fScanRequired) { #ifndef USE_REGIONS uint8_t* all_heaps_max = 0; uint8_t* all_heaps_min = MAX_PTR; int i; for (i = 0; i < n_heaps; i++) { if (all_heaps_max < g_heaps[i]->background_max_overflow_address) all_heaps_max = g_heaps[i]->background_max_overflow_address; if (all_heaps_min > g_heaps[i]->background_min_overflow_address) all_heaps_min = g_heaps[i]->background_min_overflow_address; } for (i = 0; i < n_heaps; i++) { g_heaps[i]->background_max_overflow_address = all_heaps_max; g_heaps[i]->background_min_overflow_address = all_heaps_min; } #endif //!USE_REGIONS } dprintf(2, ("Starting all gc thread mark stack overflow processing")); bgc_t_join.restart(); } // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions // being visible. If there really was an overflow (process_mark_overflow returns true) then set the // global flag indicating that at least one object promotion may have occurred (the usual comment // about races applies). (Note it's OK to set this flag even if we're about to terminate the loop and // exit the method since we unconditionally set this variable on method entry anyway). if (background_process_mark_overflow (sc->concurrent)) s_fUnscannedPromotions = TRUE; // If we decided that no scan was required we can terminate the loop now. if (!s_fScanRequired) break; // Otherwise we must join with the other workers to ensure that all mark stack overflows have been // processed before we start scanning dependent handle tables (if overflows remain while we scan we // could miss noting the promotion of some primary objects). bgc_t_join.join(this, gc_join_rescan_dependent_handles); if (bgc_t_join.joined()) { dprintf(3, ("Starting all gc thread for dependent handle promotion")); bgc_t_join.restart(); } // If the portion of the dependent handle table managed by this worker has handles that could still be // promoted perform a rescan. If the rescan resulted in at least one promotion note this fact since it // could require a rescan of handles on this or other workers. if (GCScan::GcDhUnpromotedHandlesExist(sc)) if (GCScan::GcDhReScan(sc)) s_fUnscannedPromotions = TRUE; } } #else void gc_heap::background_scan_dependent_handles (ScanContext *sc) { // Whenever we call this method there may have been preceding object promotions. So set // fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set // based on the how the scanning proceeded). bool fUnscannedPromotions = true; // Scan dependent handles repeatedly until there are no further promotions that can be made or we made a // scan without performing any new promotions. while (GCScan::GcDhUnpromotedHandlesExist(sc) && fUnscannedPromotions) { // On each iteration of the loop start with the assumption that no further objects have been promoted. fUnscannedPromotions = false; // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions // being visible. If there was an overflow (background_process_mark_overflow returned true) then // additional objects now appear to be promoted and we should set the flag. if (background_process_mark_overflow (sc->concurrent)) fUnscannedPromotions = true; // Perform the scan and set the flag if any promotions resulted. if (GCScan::GcDhReScan (sc)) fUnscannedPromotions = true; } // Perform a last processing of any overflowed mark stack. background_process_mark_overflow (sc->concurrent); } #endif //MULTIPLE_HEAPS void gc_heap::recover_bgc_settings() { if ((settings.condemned_generation < max_generation) && gc_heap::background_running_p()) { dprintf (2, ("restoring bgc settings")); settings = saved_bgc_settings; GCHeap::GcCondemnedGeneration = gc_heap::settings.condemned_generation; } } void gc_heap::allow_fgc() { assert (bgc_thread == GCToEEInterface::GetThread()); bool bToggleGC = false; if (g_fSuspensionPending > 0) { bToggleGC = GCToEEInterface::EnablePreemptiveGC(); if (bToggleGC) { GCToEEInterface::DisablePreemptiveGC(); } } } BOOL gc_heap::is_bgc_in_progress() { return (background_running_p() || (current_bgc_state == bgc_initialized)); } void gc_heap::clear_commit_flag() { for (int i = get_start_generation_index(); i < total_generation_count; i++) { generation* gen = generation_of (i); heap_segment* seg = heap_segment_in_range (generation_start_segment (gen)); while (seg) { if (seg->flags & heap_segment_flags_ma_committed) { seg->flags &= ~heap_segment_flags_ma_committed; } if (seg->flags & heap_segment_flags_ma_pcommitted) { seg->flags &= ~heap_segment_flags_ma_pcommitted; } seg = heap_segment_next (seg); } } } void gc_heap::clear_commit_flag_global() { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { g_heaps[i]->clear_commit_flag(); } #else clear_commit_flag(); #endif //MULTIPLE_HEAPS } void gc_heap::verify_mark_array_cleared (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr) { #ifdef _DEBUG size_t markw = mark_word_of (begin); size_t markw_end = mark_word_of (end); while (markw < markw_end) { if (mark_array_addr[markw]) { uint8_t* addr = mark_word_address (markw); #ifdef USE_REGIONS heap_segment* region = region_of (addr); dprintf (1, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix, r: %Ix(%Ix)) were not cleared", markw, mark_array_addr[markw], addr, (size_t)region, heap_segment_mem (region))); #else dprintf (1, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", markw, mark_array_addr[markw], addr)); #endif //USE_REGIONS FATAL_GC_ERROR(); } markw++; } #else // _DEBUG UNREFERENCED_PARAMETER(begin); UNREFERENCED_PARAMETER(end); UNREFERENCED_PARAMETER(mark_array_addr); #endif //_DEBUG } uint8_t* gc_heap::get_start_address (heap_segment* seg) { uint8_t* start = #ifdef USE_REGIONS heap_segment_mem (seg); #else (heap_segment_read_only_p(seg) ? heap_segment_mem (seg) : (uint8_t*)seg); #endif //USE_REGIONS return start; } BOOL gc_heap::commit_mark_array_new_seg (gc_heap* hp, heap_segment* seg, uint32_t* new_card_table, uint8_t* new_lowest_address) { uint8_t* start = get_start_address (seg); uint8_t* end = heap_segment_reserved (seg); uint8_t* lowest = hp->background_saved_lowest_address; uint8_t* highest = hp->background_saved_highest_address; uint8_t* commit_start = NULL; uint8_t* commit_end = NULL; size_t commit_flag = 0; if ((highest >= start) && (lowest <= end)) { if ((start >= lowest) && (end <= highest)) { dprintf (GC_TABLE_LOG, ("completely in bgc range: seg %Ix-%Ix, bgc: %Ix-%Ix", start, end, lowest, highest)); commit_flag = heap_segment_flags_ma_committed; } else { dprintf (GC_TABLE_LOG, ("partially in bgc range: seg %Ix-%Ix, bgc: %Ix-%Ix", start, end, lowest, highest)); commit_flag = heap_segment_flags_ma_pcommitted; } commit_start = max (lowest, start); commit_end = min (highest, end); if (!commit_mark_array_by_range (commit_start, commit_end, hp->mark_array)) { return FALSE; } if (new_card_table == 0) { new_card_table = g_gc_card_table; } if (hp->card_table != new_card_table) { if (new_lowest_address == 0) { new_lowest_address = g_gc_lowest_address; } uint32_t* ct = &new_card_table[card_word (gcard_of (new_lowest_address))]; uint32_t* ma = (uint32_t*)((uint8_t*)card_table_mark_array (ct) - size_mark_array_of (0, new_lowest_address)); dprintf (GC_TABLE_LOG, ("table realloc-ed: %Ix->%Ix, MA: %Ix->%Ix", hp->card_table, new_card_table, hp->mark_array, ma)); if (!commit_mark_array_by_range (commit_start, commit_end, ma)) { return FALSE; } } seg->flags |= commit_flag; } return TRUE; } BOOL gc_heap::commit_mark_array_by_range (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr) { size_t beg_word = mark_word_of (begin); size_t end_word = mark_word_of (align_on_mark_word (end)); uint8_t* commit_start = align_lower_page ((uint8_t*)&mark_array_addr[beg_word]); uint8_t* commit_end = align_on_page ((uint8_t*)&mark_array_addr[end_word]); size_t size = (size_t)(commit_end - commit_start); #ifdef SIMPLE_DPRINTF dprintf (GC_TABLE_LOG, ("range: %Ix->%Ix mark word: %Ix->%Ix(%Id), mark array: %Ix->%Ix(%Id), commit %Ix->%Ix(%Id)", begin, end, beg_word, end_word, (end_word - beg_word) * sizeof (uint32_t), &mark_array_addr[beg_word], &mark_array_addr[end_word], (size_t)(&mark_array_addr[end_word] - &mark_array_addr[beg_word]), commit_start, commit_end, size)); #endif //SIMPLE_DPRINTF if (virtual_commit (commit_start, size, gc_oh_num::none)) { // We can only verify the mark array is cleared from begin to end, the first and the last // page aren't necessarily all cleared 'cause they could be used by other segments or // card bundle. verify_mark_array_cleared (begin, end, mark_array_addr); return TRUE; } else { dprintf (GC_TABLE_LOG, ("failed to commit %Id bytes", (end_word - beg_word) * sizeof (uint32_t))); return FALSE; } } BOOL gc_heap::commit_mark_array_with_check (heap_segment* seg, uint32_t* new_mark_array_addr) { uint8_t* start = get_start_address (seg); uint8_t* end = heap_segment_reserved (seg); #ifdef MULTIPLE_HEAPS uint8_t* lowest = heap_segment_heap (seg)->background_saved_lowest_address; uint8_t* highest = heap_segment_heap (seg)->background_saved_highest_address; #else uint8_t* lowest = background_saved_lowest_address; uint8_t* highest = background_saved_highest_address; #endif //MULTIPLE_HEAPS if ((highest >= start) && (lowest <= end)) { start = max (lowest, start); end = min (highest, end); if (!commit_mark_array_by_range (start, end, new_mark_array_addr)) { return FALSE; } } return TRUE; } BOOL gc_heap::commit_mark_array_by_seg (heap_segment* seg, uint32_t* mark_array_addr) { dprintf (GC_TABLE_LOG, ("seg: %Ix->%Ix; MA: %Ix", seg, heap_segment_reserved (seg), mark_array_addr)); uint8_t* start = get_start_address (seg); return commit_mark_array_by_range (start, heap_segment_reserved (seg), mark_array_addr); } BOOL gc_heap::commit_mark_array_bgc_init() { dprintf (GC_TABLE_LOG, ("BGC init commit: lowest: %Ix, highest: %Ix, mark_array: %Ix", lowest_address, highest_address, mark_array)); for (int i = get_start_generation_index(); i < total_generation_count; i++) { generation* gen = generation_of (i); heap_segment* seg = heap_segment_in_range (generation_start_segment (gen)); while (seg) { dprintf (GC_TABLE_LOG, ("h%d gen%d seg: %Ix(%Ix-%Ix), flags: %Id", heap_number, i, seg, heap_segment_mem (seg), heap_segment_allocated (seg), seg->flags)); if (!(seg->flags & heap_segment_flags_ma_committed)) { // For ro segments they could always be only partially in range so we'd // be calling this at the beginning of every BGC. We are not making this // more efficient right now - ro segments are currently only used by redhawk. if (heap_segment_read_only_p (seg)) { if ((heap_segment_mem (seg) >= lowest_address) && (heap_segment_reserved (seg) <= highest_address)) { if (commit_mark_array_by_seg (seg, mark_array)) { seg->flags |= heap_segment_flags_ma_committed; } else { return FALSE; } } else { uint8_t* start = max (lowest_address, heap_segment_mem (seg)); uint8_t* end = min (highest_address, heap_segment_reserved (seg)); if (commit_mark_array_by_range (start, end, mark_array)) { seg->flags |= heap_segment_flags_ma_pcommitted; } else { return FALSE; } } } else { // For normal segments they are by design completely in range so just // commit the whole mark array for each seg. if (commit_mark_array_by_seg (seg, mark_array)) { if (seg->flags & heap_segment_flags_ma_pcommitted) { seg->flags &= ~heap_segment_flags_ma_pcommitted; } seg->flags |= heap_segment_flags_ma_committed; } else { return FALSE; } } } seg = heap_segment_next (seg); } } return TRUE; } // This function doesn't check the commit flag since it's for a new array - // the mark_array flag for these segments will remain the same. BOOL gc_heap::commit_new_mark_array (uint32_t* new_mark_array_addr) { dprintf (GC_TABLE_LOG, ("committing existing segs on MA %Ix", new_mark_array_addr)); for (int i = get_start_generation_index(); i < total_generation_count; i++) { generation* gen = generation_of (i); heap_segment* seg = heap_segment_in_range (generation_start_segment (gen)); while (seg) { if (!commit_mark_array_with_check (seg, new_mark_array_addr)) { return FALSE; } seg = heap_segment_next (seg); } } #ifdef MULTIPLE_HEAPS if (new_heap_segment) { if (!commit_mark_array_with_check (new_heap_segment, new_mark_array_addr)) { return FALSE; } } #endif //MULTIPLE_HEAPS return TRUE; } BOOL gc_heap::commit_new_mark_array_global (uint32_t* new_mark_array) { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { if (!g_heaps[i]->commit_new_mark_array (new_mark_array)) { return FALSE; } } #else if (!commit_new_mark_array (new_mark_array)) { return FALSE; } #endif //MULTIPLE_HEAPS return TRUE; } void gc_heap::decommit_mark_array_by_seg (heap_segment* seg) { // if BGC is disabled (the finalize watchdog does this at shutdown), the mark array could have // been set to NULL. if (mark_array == NULL) { return; } dprintf (GC_TABLE_LOG, ("decommitting seg %Ix(%Ix), MA: %Ix", seg, seg->flags, mark_array)); size_t flags = seg->flags; if ((flags & heap_segment_flags_ma_committed) || (flags & heap_segment_flags_ma_pcommitted)) { uint8_t* start = get_start_address (seg); uint8_t* end = heap_segment_reserved (seg); if (flags & heap_segment_flags_ma_pcommitted) { start = max (lowest_address, start); end = min (highest_address, end); } size_t beg_word = mark_word_of (start); size_t end_word = mark_word_of (align_on_mark_word (end)); uint8_t* decommit_start = align_on_page ((uint8_t*)&mark_array[beg_word]); uint8_t* decommit_end = align_lower_page ((uint8_t*)&mark_array[end_word]); size_t size = (size_t)(decommit_end - decommit_start); #ifdef SIMPLE_DPRINTF dprintf (GC_TABLE_LOG, ("seg: %Ix mark word: %Ix->%Ix(%Id), mark array: %Ix->%Ix(%Id), decommit %Ix->%Ix(%Id)", seg, beg_word, end_word, (end_word - beg_word) * sizeof (uint32_t), &mark_array[beg_word], &mark_array[end_word], (size_t)(&mark_array[end_word] - &mark_array[beg_word]), decommit_start, decommit_end, size)); #endif //SIMPLE_DPRINTF if (decommit_start < decommit_end) { if (!virtual_decommit (decommit_start, size, gc_oh_num::none)) { dprintf (GC_TABLE_LOG, ("decommit on %Ix for %Id bytes failed", decommit_start, size)); assert (!"decommit failed"); } } dprintf (GC_TABLE_LOG, ("decommited [%Ix for address [%Ix", beg_word, seg)); } } bool gc_heap::should_update_end_mark_size() { return ((settings.condemned_generation == (max_generation - 1)) && (current_c_gc_state == c_gc_state_planning)); } void gc_heap::background_mark_phase () { verify_mark_array_cleared(); ScanContext sc; sc.thread_number = heap_number; sc.promotion = TRUE; sc.concurrent = FALSE; THREAD_FROM_HEAP; BOOL cooperative_mode = TRUE; #ifndef MULTIPLE_HEAPS const int thread = heap_number; #endif //!MULTIPLE_HEAPS dprintf(2,("-(GC%d)BMark-", VolatileLoad(&settings.gc_index))); assert (settings.concurrent); if (gen0_must_clear_bricks > 0) gen0_must_clear_bricks--; background_soh_alloc_count = 0; background_uoh_alloc_count = 0; bgc_overflow_count = 0; bpromoted_bytes (heap_number) = 0; static uint32_t num_sizedrefs = 0; #ifdef USE_REGIONS background_overflow_p = FALSE; #else background_min_overflow_address = MAX_PTR; background_max_overflow_address = 0; background_min_soh_overflow_address = MAX_PTR; background_max_soh_overflow_address = 0; #endif //USE_REGIONS processed_eph_overflow_p = FALSE; //set up the mark lists from g_mark_list assert (g_mark_list); mark_list = g_mark_list; //dont use the mark list for full gc //because multiple segments are more complex to handle and the list //is likely to overflow mark_list_end = &mark_list [0]; mark_list_index = &mark_list [0]; c_mark_list_index = 0; #ifndef MULTIPLE_HEAPS shigh = (uint8_t*) 0; slow = MAX_PTR; #endif //MULTIPLE_HEAPS generation* gen = generation_of (max_generation); dprintf(3,("BGC: stack marking")); sc.concurrent = TRUE; GCScan::GcScanRoots(background_promote_callback, max_generation, max_generation, &sc); dprintf(3,("BGC: finalization marking")); finalize_queue->GcScanRoots(background_promote_callback, heap_number, 0); size_t total_soh_size = generation_sizes (generation_of (max_generation)); size_t total_loh_size = generation_size (loh_generation); size_t total_poh_size = generation_size (poh_generation); bgc_begin_loh_size = total_loh_size; bgc_begin_poh_size = total_poh_size; bgc_loh_size_increased = 0; bgc_poh_size_increased = 0; background_soh_size_end_mark = 0; dprintf (GTC_LOG, ("BM: h%d: loh: %Id, soh: %Id, poh: %Id", heap_number, total_loh_size, total_soh_size, total_poh_size)); //concurrent_print_time_delta ("copying stack roots"); concurrent_print_time_delta ("CS"); FIRE_EVENT(BGC1stNonConEnd); #ifndef USE_REGIONS saved_overflow_ephemeral_seg = 0; #endif //!USE_REGIONS current_bgc_state = bgc_reset_ww; // we don't need a join here - just whichever thread that gets here // first can change the states and call restart_vm. // this is not true - we can't let the EE run when we are scanning stack. // since we now allow reset ww to run concurrently and have a join for it, // we can do restart ee on the 1st thread that got here. Make sure we handle the // sizedref handles correctly. #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_restart_ee); if (bgc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP // Resetting write watch for software write watch is pretty fast, much faster than for hardware write watch. Reset // can be done while the runtime is suspended or after the runtime is restarted, the preference was to reset while // the runtime is suspended. The reset for hardware write watch is done after the runtime is restarted below. concurrent_print_time_delta ("CRWW begin"); #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { g_heaps[i]->reset_write_watch (FALSE); } #else reset_write_watch (FALSE); #endif //MULTIPLE_HEAPS concurrent_print_time_delta ("CRWW"); #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles(); // this c_write is not really necessary because restart_vm // has an instruction that will flush the cpu cache (interlocked // or whatever) but we don't want to rely on that. dprintf (GTC_LOG, ("setting cm_in_progress")); c_write (cm_in_progress, TRUE); assert (dont_restart_ee_p); dont_restart_ee_p = FALSE; restart_vm(); GCToOSInterface::YieldThread (0); #ifdef MULTIPLE_HEAPS dprintf(3, ("Starting all gc threads for gc")); bgc_t_join.restart(); #endif //MULTIPLE_HEAPS } #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_after_reset); if (bgc_t_join.joined()) #endif //MULTIPLE_HEAPS { disable_preemptive (true); #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP // When software write watch is enabled, resetting write watch is done while the runtime is // suspended above. The post-reset call to revisit_written_pages is only necessary for concurrent // reset_write_watch, to discard dirtied pages during the concurrent reset. #ifdef WRITE_WATCH concurrent_print_time_delta ("CRWW begin"); #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { g_heaps[i]->reset_write_watch (TRUE); } #else reset_write_watch (TRUE); #endif //MULTIPLE_HEAPS concurrent_print_time_delta ("CRWW"); #endif //WRITE_WATCH #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { g_heaps[i]->revisit_written_pages (TRUE, TRUE); } #else revisit_written_pages (TRUE, TRUE); #endif //MULTIPLE_HEAPS concurrent_print_time_delta ("CRW"); #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { g_heaps[i]->current_bgc_state = bgc_mark_handles; } #else current_bgc_state = bgc_mark_handles; #endif //MULTIPLE_HEAPS current_c_gc_state = c_gc_state_marking; enable_preemptive (); #ifdef MULTIPLE_HEAPS dprintf(3, ("Joining BGC threads after resetting writewatch")); bgc_t_join.restart(); #endif //MULTIPLE_HEAPS } disable_preemptive (true); if (num_sizedrefs > 0) { GCScan::GcScanSizedRefs(background_promote, max_generation, max_generation, &sc); enable_preemptive (); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_scan_sizedref_done); if (bgc_t_join.joined()) { dprintf(3, ("Done with marking all sized refs. Starting all bgc thread for marking other strong roots")); bgc_t_join.restart(); } #endif //MULTIPLE_HEAPS disable_preemptive (true); } dprintf (3,("BGC: handle table marking")); GCScan::GcScanHandles(background_promote, max_generation, max_generation, &sc); //concurrent_print_time_delta ("concurrent marking handle table"); concurrent_print_time_delta ("CRH"); current_bgc_state = bgc_mark_stack; dprintf (2,("concurrent draining mark list")); background_drain_mark_list (thread); //concurrent_print_time_delta ("concurrent marking stack roots"); concurrent_print_time_delta ("CRS"); dprintf (2,("concurrent revisiting dirtied pages")); // tuning has shown that there are advantages in doing this 2 times revisit_written_pages (TRUE); revisit_written_pages (TRUE); //concurrent_print_time_delta ("concurrent marking dirtied pages on LOH"); concurrent_print_time_delta ("CRre"); enable_preemptive (); #if defined(MULTIPLE_HEAPS) && !defined(USE_REGIONS) bgc_t_join.join(this, gc_join_concurrent_overflow); if (bgc_t_join.joined()) { uint8_t* all_heaps_max = 0; uint8_t* all_heaps_min = MAX_PTR; int i; for (i = 0; i < n_heaps; i++) { dprintf (3, ("heap %d overflow max is %Ix, min is %Ix", i, g_heaps[i]->background_max_overflow_address, g_heaps[i]->background_min_overflow_address)); if (all_heaps_max < g_heaps[i]->background_max_overflow_address) all_heaps_max = g_heaps[i]->background_max_overflow_address; if (all_heaps_min > g_heaps[i]->background_min_overflow_address) all_heaps_min = g_heaps[i]->background_min_overflow_address; } for (i = 0; i < n_heaps; i++) { g_heaps[i]->background_max_overflow_address = all_heaps_max; g_heaps[i]->background_min_overflow_address = all_heaps_min; } dprintf(3, ("Starting all bgc threads after updating the overflow info")); bgc_t_join.restart(); } #endif //MULTIPLE_HEAPS && !USE_REGIONS disable_preemptive (true); dprintf (2, ("before CRov count: %d", bgc_overflow_count)); bgc_overflow_count = 0; background_process_mark_overflow (TRUE); dprintf (2, ("after CRov count: %d", bgc_overflow_count)); bgc_overflow_count = 0; //concurrent_print_time_delta ("concurrent processing mark overflow"); concurrent_print_time_delta ("CRov"); // Stop all threads, crawl all stacks and revisit changed pages. FIRE_EVENT(BGC1stConEnd); dprintf (2, ("Stopping the EE")); enable_preemptive (); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_suspend_ee); if (bgc_t_join.joined()) { bgc_threads_sync_event.Reset(); dprintf(3, ("Joining BGC threads for non concurrent final marking")); bgc_t_join.restart(); } #endif //MULTIPLE_HEAPS if (heap_number == 0) { enter_spin_lock (&gc_lock); suspended_start_time = GetHighPrecisionTimeStamp(); bgc_suspend_EE (); //suspend_EE (); bgc_threads_sync_event.Set(); } else { bgc_threads_sync_event.Wait(INFINITE, FALSE); dprintf (2, ("bgc_threads_sync_event is signalled")); } assert (settings.concurrent); assert (settings.condemned_generation == max_generation); dprintf (2, ("clearing cm_in_progress")); c_write (cm_in_progress, FALSE); bgc_alloc_lock->check(); current_bgc_state = bgc_final_marking; //concurrent_print_time_delta ("concurrent marking ended"); concurrent_print_time_delta ("CR"); FIRE_EVENT(BGC2ndNonConBegin); mark_absorb_new_alloc(); #ifdef FEATURE_EVENT_TRACE static uint64_t current_mark_time = 0; static uint64_t last_mark_time = 0; #endif //FEATURE_EVENT_TRACE // We need a join here 'cause find_object would complain if the gen0 // bricks of another heap haven't been fixed up. So we need to make sure // that every heap's gen0 bricks are fixed up before we proceed. #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_after_absorb); if (bgc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef BGC_SERVO_TUNING bgc_tuning::record_bgc_sweep_start(); #endif //BGC_SERVO_TUNING GCToEEInterface::BeforeGcScanRoots(max_generation, /* is_bgc */ true, /* is_concurrent */ false); #ifdef FEATURE_EVENT_TRACE informational_event_enabled_p = EVENT_ENABLED (GCMarkWithType); if (informational_event_enabled_p) last_mark_time = GetHighPrecisionTimeStamp(); #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS dprintf(3, ("Joining BGC threads after absorb")); bgc_t_join.restart(); #endif //MULTIPLE_HEAPS } //reset the flag, indicating that the EE no longer expect concurrent //marking sc.concurrent = FALSE; total_soh_size = generation_sizes (generation_of (max_generation)); total_loh_size = generation_size (loh_generation); total_poh_size = generation_size (poh_generation); dprintf (GTC_LOG, ("FM: h%d: loh: %Id, soh: %Id, poh: %Id", heap_number, total_loh_size, total_soh_size, total_poh_size)); dprintf (2, ("nonconcurrent marking stack roots")); GCScan::GcScanRoots(background_promote, max_generation, max_generation, &sc); //concurrent_print_time_delta ("nonconcurrent marking stack roots"); concurrent_print_time_delta ("NRS"); finalize_queue->GcScanRoots(background_promote, heap_number, 0); dprintf (2, ("nonconcurrent marking handle table")); GCScan::GcScanHandles(background_promote, max_generation, max_generation, &sc); //concurrent_print_time_delta ("nonconcurrent marking handle table"); concurrent_print_time_delta ("NRH"); dprintf (2,("---- (GC%d)final going through written pages ----", VolatileLoad(&settings.gc_index))); revisit_written_pages (FALSE); //concurrent_print_time_delta ("nonconcurrent revisit dirtied pages on LOH"); concurrent_print_time_delta ("NRre LOH"); dprintf (2, ("before NR 1st Hov count: %d", bgc_overflow_count)); bgc_overflow_count = 0; // Dependent handles need to be scanned with a special algorithm (see the header comment on // scan_dependent_handles for more detail). We perform an initial scan without processing any mark // stack overflow. This is not guaranteed to complete the operation but in a common case (where there // are no dependent handles that are due to be collected) it allows us to optimize away further scans. // The call to background_scan_dependent_handles is what will cycle through more iterations if // required and will also perform processing of any mark stack overflow once the dependent handle // table has been fully promoted. dprintf (2, ("1st dependent handle scan and process mark overflow")); GCScan::GcDhInitialScan(background_promote, max_generation, max_generation, &sc); background_scan_dependent_handles (&sc); //concurrent_print_time_delta ("1st nonconcurrent dependent handle scan and process mark overflow"); concurrent_print_time_delta ("NR 1st Hov"); dprintf (2, ("after NR 1st Hov count: %d", bgc_overflow_count)); bgc_overflow_count = 0; #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_null_dead_short_weak); if (bgc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_EVENT_TRACE bgc_time_info[time_mark_sizedref] = 0; record_mark_time (bgc_time_info[time_mark_roots], current_mark_time, last_mark_time); #endif //FEATURE_EVENT_TRACE #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP // The runtime is suspended, take this opportunity to pause tracking written pages to // avoid further perf penalty after the runtime is restarted SoftwareWriteWatch::DisableForGCHeap(); #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP GCToEEInterface::AfterGcScanRoots (max_generation, max_generation, &sc); #ifdef MULTIPLE_HEAPS dprintf(3, ("Joining BGC threads for short weak handle scan")); bgc_t_join.restart(); #endif //MULTIPLE_HEAPS } // null out the target of short weakref that were not promoted. GCScan::GcShortWeakPtrScan(max_generation, max_generation, &sc); //concurrent_print_time_delta ("bgc GcShortWeakPtrScan"); concurrent_print_time_delta ("NR GcShortWeakPtrScan"); { #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_scan_finalization); if (bgc_t_join.joined()) { #endif //MULTIPLE_HEAPS #ifdef FEATURE_EVENT_TRACE record_mark_time (bgc_time_info[time_mark_short_weak], current_mark_time, last_mark_time); #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS dprintf(3, ("Joining BGC threads for finalization")); bgc_t_join.restart(); } #endif //MULTIPLE_HEAPS dprintf(3,("Marking finalization data")); //concurrent_print_time_delta ("bgc joined to mark finalization"); concurrent_print_time_delta ("NRj"); finalize_queue->ScanForFinalization (background_promote, max_generation, FALSE, __this); concurrent_print_time_delta ("NRF"); } dprintf (2, ("before NR 2nd Hov count: %d", bgc_overflow_count)); bgc_overflow_count = 0; // Scan dependent handles again to promote any secondaries associated with primaries that were promoted // for finalization. As before background_scan_dependent_handles will also process any mark stack // overflow. dprintf (2, ("2nd dependent handle scan and process mark overflow")); background_scan_dependent_handles (&sc); //concurrent_print_time_delta ("2nd nonconcurrent dependent handle scan and process mark overflow"); concurrent_print_time_delta ("NR 2nd Hov"); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_null_dead_long_weak); if (bgc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_EVENT_TRACE record_mark_time (bgc_time_info[time_mark_scan_finalization], current_mark_time, last_mark_time); #endif //FEATURE_EVENT_TRACE #ifdef MULTIPLE_HEAPS dprintf(2, ("Joining BGC threads for weak pointer deletion")); bgc_t_join.restart(); #endif //MULTIPLE_HEAPS } // null out the target of long weakref that were not promoted. GCScan::GcWeakPtrScan (max_generation, max_generation, &sc); concurrent_print_time_delta ("NR GcWeakPtrScan"); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_null_dead_syncblk); if (bgc_t_join.joined()) #endif //MULTIPLE_HEAPS { dprintf (2, ("calling GcWeakPtrScanBySingleThread")); // scan for deleted entries in the syncblk cache GCScan::GcWeakPtrScanBySingleThread (max_generation, max_generation, &sc); #ifdef FEATURE_EVENT_TRACE record_mark_time (bgc_time_info[time_mark_long_weak], current_mark_time, last_mark_time); #endif //FEATURE_EVENT_TRACE concurrent_print_time_delta ("NR GcWeakPtrScanBySingleThread"); #ifdef MULTIPLE_HEAPS dprintf(2, ("Starting BGC threads for end of background mark phase")); bgc_t_join.restart(); #endif //MULTIPLE_HEAPS } dprintf (2, ("end of bgc mark: loh: %d, poh: %d, soh: %d", generation_size (loh_generation), generation_size (poh_generation), generation_sizes (generation_of (max_generation)))); for (int gen_idx = max_generation; gen_idx < total_generation_count; gen_idx++) { generation* gen = generation_of (gen_idx); dynamic_data* dd = dynamic_data_of (gen_idx); dd_begin_data_size (dd) = generation_size (gen_idx) - (generation_free_list_space (gen) + generation_free_obj_space (gen)) - get_generation_start_size (gen_idx); dd_survived_size (dd) = 0; dd_pinned_survived_size (dd) = 0; dd_artificial_pinned_survived_size (dd) = 0; dd_added_pinned_size (dd) = 0; } for (int i = get_start_generation_index(); i < uoh_start_generation; i++) { heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (i))); PREFIX_ASSUME(seg != NULL); while (seg) { seg->flags &= ~heap_segment_flags_swept; #ifndef USE_REGIONS if (heap_segment_allocated (seg) == heap_segment_mem (seg)) { FATAL_GC_ERROR(); } if (seg == ephemeral_heap_segment) { heap_segment_background_allocated (seg) = generation_allocation_start (generation_of (max_generation - 1)); } else #endif //!USE_REGIONS { heap_segment_background_allocated (seg) = heap_segment_allocated (seg); } background_soh_size_end_mark += heap_segment_background_allocated (seg) - heap_segment_mem (seg); dprintf (3333, ("h%d gen%d seg %Ix (%Ix) background allocated is %Ix", heap_number, i, (size_t)(seg), heap_segment_mem (seg), heap_segment_background_allocated (seg))); seg = heap_segment_next_rw (seg); } } // We need to void alloc contexts here 'cause while background_ephemeral_sweep is running // we can't let the user code consume the left over parts in these alloc contexts. repair_allocation_contexts (FALSE); dprintf (2, ("end of bgc mark: gen2 free list space: %d, free obj space: %d", generation_free_list_space (generation_of (max_generation)), generation_free_obj_space (generation_of (max_generation)))); dprintf(2,("---- (GC%d)End of background mark phase ----", VolatileLoad(&settings.gc_index))); } void gc_heap::suspend_EE () { dprintf (2, ("suspend_EE")); #ifdef MULTIPLE_HEAPS gc_heap* hp = gc_heap::g_heaps[0]; GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP); #else GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP); #endif //MULTIPLE_HEAPS } #ifdef MULTIPLE_HEAPS void gc_heap::bgc_suspend_EE () { for (int i = 0; i < n_heaps; i++) { gc_heap::g_heaps[i]->reset_gc_done(); } gc_started = TRUE; dprintf (2, ("bgc_suspend_EE")); GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP); gc_started = FALSE; for (int i = 0; i < n_heaps; i++) { gc_heap::g_heaps[i]->set_gc_done(); } } #else void gc_heap::bgc_suspend_EE () { reset_gc_done(); gc_started = TRUE; dprintf (2, ("bgc_suspend_EE")); GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP); gc_started = FALSE; set_gc_done(); } #endif //MULTIPLE_HEAPS void gc_heap::restart_EE () { dprintf (2, ("restart_EE")); #ifdef MULTIPLE_HEAPS GCToEEInterface::RestartEE(FALSE); #else GCToEEInterface::RestartEE(FALSE); #endif //MULTIPLE_HEAPS } inline uint8_t* gc_heap::high_page (heap_segment* seg, BOOL concurrent_p) { #ifdef USE_REGIONS assert (!concurrent_p || (heap_segment_gen_num (seg) >= max_generation)); #else if (concurrent_p) { uint8_t* end = ((seg == ephemeral_heap_segment) ? generation_allocation_start (generation_of (max_generation - 1)) : heap_segment_allocated (seg)); return align_lower_page (end); } else #endif //USE_REGIONS { return heap_segment_allocated (seg); } } void gc_heap::revisit_written_page (uint8_t* page, uint8_t* end, BOOL concurrent_p, uint8_t*& last_page, uint8_t*& last_object, BOOL large_objects_p, size_t& num_marked_objects) { uint8_t* start_address = page; uint8_t* o = 0; int align_const = get_alignment_constant (!large_objects_p); uint8_t* high_address = end; uint8_t* current_lowest_address = background_saved_lowest_address; uint8_t* current_highest_address = background_saved_highest_address; BOOL no_more_loop_p = FALSE; THREAD_FROM_HEAP; #ifndef MULTIPLE_HEAPS const int thread = heap_number; #endif //!MULTIPLE_HEAPS if (large_objects_p) { o = last_object; } else { if (((last_page + WRITE_WATCH_UNIT_SIZE) == page) || (start_address <= last_object)) { o = last_object; } else { o = find_first_object (start_address, last_object); // We can visit the same object again, but on a different page. assert (o >= last_object); } } dprintf (3,("page %Ix start: %Ix, %Ix[ ", (size_t)page, (size_t)o, (size_t)(min (high_address, page + WRITE_WATCH_UNIT_SIZE)))); while (o < (min (high_address, page + WRITE_WATCH_UNIT_SIZE))) { size_t s; if (concurrent_p && large_objects_p) { bgc_alloc_lock->bgc_mark_set (o); if (((CObjectHeader*)o)->IsFree()) { s = unused_array_size (o); } else { s = size (o); } } else { s = size (o); } dprintf (3,("Considering object %Ix(%s)", (size_t)o, (background_object_marked (o, FALSE) ? "bm" : "nbm"))); assert (Align (s) >= Align (min_obj_size)); uint8_t* next_o = o + Align (s, align_const); if (next_o >= start_address) { #ifdef MULTIPLE_HEAPS if (concurrent_p) { // We set last_object here for SVR BGC here because SVR BGC has more than // one GC thread. When we have more than one GC thread we would run into this // situation if we skipped unmarked objects: // bgc thread 1 calls GWW, and detect object X not marked so it would skip it // for revisit. // bgc thread 2 marks X and all its current children. // user thread comes along and dirties more (and later) pages in X. // bgc thread 1 calls GWW again and gets those later pages but it will not mark anything // on them because it had already skipped X. We need to detect that this object is now // marked and mark the children on the dirtied pages. // In the future if we have less BGC threads than we have heaps we should add // the check to the number of BGC threads. last_object = o; } #endif //MULTIPLE_HEAPS if (contain_pointers (o) && (!((o >= current_lowest_address) && (o < current_highest_address)) || background_marked (o))) { dprintf (3, ("going through %Ix", (size_t)o)); go_through_object (method_table(o), o, s, poo, start_address, use_start, (o + s), if ((uint8_t*)poo >= min (high_address, page + WRITE_WATCH_UNIT_SIZE)) { no_more_loop_p = TRUE; goto end_limit; } uint8_t* oo = *poo; num_marked_objects++; background_mark_object (oo THREAD_NUMBER_ARG); ); } else if ( concurrent_p && ((CObjectHeader*)o)->IsFree() && (next_o > min (high_address, page + WRITE_WATCH_UNIT_SIZE))) { // We need to not skip the object here because of this corner scenario: // A large object was being allocated during BGC mark so we first made it // into a free object, then cleared its memory. In this loop we would detect // that it's a free object which normally we would skip. But by the next time // we call GetWriteWatch we could still be on this object and the object had // been made into a valid object and some of its memory was changed. We need // to be sure to process those written pages so we can't skip the object just // yet. // // Similarly, when using software write watch, don't advance last_object when // the current object is a free object that spans beyond the current page or // high_address. Software write watch acquires gc_lock before the concurrent // GetWriteWatch() call during revisit_written_pages(). A foreground GC may // happen at that point and allocate from this free region, so when // revisit_written_pages() continues, it cannot skip now-valid objects in this // region. no_more_loop_p = TRUE; goto end_limit; } } end_limit: if (concurrent_p && large_objects_p) { bgc_alloc_lock->bgc_mark_done (); } if (no_more_loop_p) { break; } o = next_o; } #ifdef MULTIPLE_HEAPS if (concurrent_p) { assert (last_object < (min (high_address, page + WRITE_WATCH_UNIT_SIZE))); } else #endif //MULTIPLE_HEAPS { last_object = o; } dprintf (3,("Last object: %Ix", (size_t)last_object)); last_page = align_write_watch_lower_page (o); if (concurrent_p) { allow_fgc(); } } // When reset_only_p is TRUE, we should only reset pages that are in range // because we need to consider the segments or part of segments that were // allocated out of range all live. void gc_heap::revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p) { if (concurrent_p && !reset_only_p) { current_bgc_state = bgc_revisit_soh; } size_t total_dirtied_pages = 0; size_t total_marked_objects = 0; bool reset_watch_state = !!concurrent_p; bool is_runtime_suspended = !concurrent_p; BOOL small_object_segments = TRUE; int start_gen_idx = get_start_generation_index(); #ifdef USE_REGIONS if (concurrent_p && !reset_only_p) { // We don't go into ephemeral regions during concurrent revisit. start_gen_idx = max_generation; } #endif //USE_REGIONS for (int i = start_gen_idx; i < total_generation_count; i++) { heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (i))); PREFIX_ASSUME(seg != NULL); while (seg) { uint8_t* base_address = (uint8_t*)heap_segment_mem (seg); //we need to truncate to the base of the page because //some newly allocated could exist beyond heap_segment_allocated //and if we reset the last page write watch status, // they wouldn't be guaranteed to be visited -> gc hole. uintptr_t bcount = array_size; uint8_t* last_page = 0; uint8_t* last_object = heap_segment_mem (seg); uint8_t* high_address = 0; BOOL skip_seg_p = FALSE; if (reset_only_p) { if ((heap_segment_mem (seg) >= background_saved_lowest_address) || (heap_segment_reserved (seg) <= background_saved_highest_address)) { dprintf (3, ("h%d: sseg: %Ix(-%Ix)", heap_number, heap_segment_mem (seg), heap_segment_reserved (seg))); skip_seg_p = TRUE; } } if (!skip_seg_p) { dprintf (3, ("looking at seg %Ix", (size_t)last_object)); if (reset_only_p) { base_address = max (base_address, background_saved_lowest_address); dprintf (3, ("h%d: reset only starting %Ix", heap_number, base_address)); } dprintf (3, ("h%d: starting: %Ix, seg %Ix-%Ix", heap_number, base_address, heap_segment_mem (seg), heap_segment_reserved (seg))); while (1) { if (reset_only_p) { high_address = ((seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg)); high_address = min (high_address, background_saved_highest_address); } else { high_address = high_page (seg, concurrent_p); } if ((base_address < high_address) && (bcount >= array_size)) { ptrdiff_t region_size = high_address - base_address; dprintf (3, ("h%d: gw: [%Ix(%Id)", heap_number, (size_t)base_address, (size_t)region_size)); #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP // When the runtime is not suspended, it's possible for the table to be resized concurrently with the scan // for dirty pages below. Prevent that by synchronizing with grow_brick_card_tables(). When the runtime is // suspended, it's ok to scan for dirty pages concurrently from multiple background GC threads for disjoint // memory regions. if (!is_runtime_suspended) { enter_spin_lock(&gc_lock); } #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP get_write_watch_for_gc_heap (reset_watch_state, base_address, region_size, (void**)background_written_addresses, &bcount, is_runtime_suspended); #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP if (!is_runtime_suspended) { leave_spin_lock(&gc_lock); } #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP if (bcount != 0) { total_dirtied_pages += bcount; dprintf (3, ("Found %d pages [%Ix, %Ix[", bcount, (size_t)base_address, (size_t)high_address)); } if (!reset_only_p) { for (unsigned i = 0; i < bcount; i++) { uint8_t* page = (uint8_t*)background_written_addresses[i]; dprintf (3, ("looking at page %d at %Ix(h: %Ix)", i, (size_t)page, (size_t)high_address)); if (page < high_address) { //search for marked objects in the page revisit_written_page (page, high_address, concurrent_p, last_page, last_object, !small_object_segments, total_marked_objects); } else { dprintf (3, ("page %d at %Ix is >= %Ix!", i, (size_t)page, (size_t)high_address)); assert (!"page shouldn't have exceeded limit"); } } } if (bcount >= array_size){ base_address = background_written_addresses [array_size-1] + WRITE_WATCH_UNIT_SIZE; bcount = array_size; } } else { break; } } } seg = heap_segment_next_rw (seg); } if (i == soh_gen2) { if (!reset_only_p) { dprintf (GTC_LOG, ("h%d: SOH: dp:%Id; mo: %Id", heap_number, total_dirtied_pages, total_marked_objects)); fire_revisit_event (total_dirtied_pages, total_marked_objects, FALSE); concurrent_print_time_delta (concurrent_p ? "CR SOH" : "NR SOH"); total_dirtied_pages = 0; total_marked_objects = 0; } if (concurrent_p && !reset_only_p) { current_bgc_state = bgc_revisit_uoh; } small_object_segments = FALSE; dprintf (3, ("now revisiting large object segments")); } else { if (reset_only_p) { dprintf (GTC_LOG, ("h%d: tdp: %Id", heap_number, total_dirtied_pages)); } else { dprintf (GTC_LOG, ("h%d: LOH: dp:%Id; mo: %Id", heap_number, total_dirtied_pages, total_marked_objects)); fire_revisit_event (total_dirtied_pages, total_marked_objects, TRUE); } } } } void gc_heap::background_grow_c_mark_list() { assert (c_mark_list_index >= c_mark_list_length); BOOL should_drain_p = FALSE; THREAD_FROM_HEAP; #ifndef MULTIPLE_HEAPS const int thread = heap_number; #endif //!MULTIPLE_HEAPS dprintf (2, ("stack copy buffer overflow")); uint8_t** new_c_mark_list = 0; { FAULT_NOT_FATAL(); if (c_mark_list_length >= (SIZE_T_MAX / (2 * sizeof (uint8_t*)))) { should_drain_p = TRUE; } else { new_c_mark_list = new (nothrow) uint8_t*[c_mark_list_length*2]; if (new_c_mark_list == 0) { should_drain_p = TRUE; } } } if (should_drain_p) { dprintf (2, ("No more memory for the stacks copy, draining..")); //drain the list by marking its elements background_drain_mark_list (thread); } else { assert (new_c_mark_list); memcpy (new_c_mark_list, c_mark_list, c_mark_list_length*sizeof(uint8_t*)); c_mark_list_length = c_mark_list_length*2; delete c_mark_list; c_mark_list = new_c_mark_list; } } void gc_heap::background_promote_callback (Object** ppObject, ScanContext* sc, uint32_t flags) { UNREFERENCED_PARAMETER(sc); //in order to save space on the array, mark the object, //knowing that it will be visited later assert (settings.concurrent); THREAD_NUMBER_FROM_CONTEXT; #ifndef MULTIPLE_HEAPS const int thread = 0; #endif //!MULTIPLE_HEAPS uint8_t* o = (uint8_t*)*ppObject; if (o == 0) return; HEAP_FROM_THREAD; gc_heap* hp = gc_heap::heap_of (o); if ((o < hp->background_saved_lowest_address) || (o >= hp->background_saved_highest_address)) { return; } if (flags & GC_CALL_INTERIOR) { o = hp->find_object (o); if (o == 0) return; } #ifdef FEATURE_CONSERVATIVE_GC // For conservative GC, a value on stack may point to middle of a free object. // In this case, we don't need to promote the pointer. if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree()) { return; } #endif //FEATURE_CONSERVATIVE_GC #ifdef _DEBUG ((CObjectHeader*)o)->Validate(); #endif //_DEBUG dprintf (3, ("Concurrent Background Promote %Ix", (size_t)o)); if (o && (size (o) > loh_size_threshold)) { dprintf (3, ("Brc %Ix", (size_t)o)); } if (hpt->c_mark_list_index >= hpt->c_mark_list_length) { hpt->background_grow_c_mark_list(); } dprintf (3, ("pushing %08x into mark_list", (size_t)o)); hpt->c_mark_list [hpt->c_mark_list_index++] = o; STRESS_LOG3(LF_GC|LF_GCROOTS, LL_INFO1000000, " GCHeap::Background Promote: Promote GC Root *%p = %p MT = %pT", ppObject, o, o ? ((Object*) o)->GetGCSafeMethodTable() : NULL); } void gc_heap::mark_absorb_new_alloc() { fix_allocation_contexts (FALSE); gen0_bricks_cleared = FALSE; clear_gen0_bricks(); } BOOL gc_heap::prepare_bgc_thread(gc_heap* gh) { BOOL success = FALSE; BOOL thread_created = FALSE; dprintf (2, ("Preparing gc thread")); gh->bgc_threads_timeout_cs.Enter(); if (!(gh->bgc_thread_running)) { dprintf (2, ("GC thread not running")); if ((gh->bgc_thread == 0) && create_bgc_thread(gh)) { success = TRUE; thread_created = TRUE; } } else { dprintf (3, ("GC thread already running")); success = TRUE; } gh->bgc_threads_timeout_cs.Leave(); if(thread_created) FIRE_EVENT(GCCreateConcurrentThread_V1); return success; } BOOL gc_heap::create_bgc_thread(gc_heap* gh) { assert (background_gc_done_event.IsValid()); //dprintf (2, ("Creating BGC thread")); gh->bgc_thread_running = GCToEEInterface::CreateThread(gh->bgc_thread_stub, gh, true, ".NET BGC"); return gh->bgc_thread_running; } BOOL gc_heap::create_bgc_threads_support (int number_of_heaps) { BOOL ret = FALSE; dprintf (3, ("Creating concurrent GC thread for the first time")); if (!background_gc_done_event.CreateManualEventNoThrow(TRUE)) { goto cleanup; } if (!bgc_threads_sync_event.CreateManualEventNoThrow(FALSE)) { goto cleanup; } if (!ee_proceed_event.CreateAutoEventNoThrow(FALSE)) { goto cleanup; } if (!bgc_start_event.CreateManualEventNoThrow(FALSE)) { goto cleanup; } #ifdef MULTIPLE_HEAPS bgc_t_join.init (number_of_heaps, join_flavor_bgc); #else UNREFERENCED_PARAMETER(number_of_heaps); #endif //MULTIPLE_HEAPS ret = TRUE; cleanup: if (!ret) { if (background_gc_done_event.IsValid()) { background_gc_done_event.CloseEvent(); } if (bgc_threads_sync_event.IsValid()) { bgc_threads_sync_event.CloseEvent(); } if (ee_proceed_event.IsValid()) { ee_proceed_event.CloseEvent(); } if (bgc_start_event.IsValid()) { bgc_start_event.CloseEvent(); } } return ret; } BOOL gc_heap::create_bgc_thread_support() { uint8_t** parr; //needs to have room for enough smallest objects fitting on a page parr = new (nothrow) uint8_t*[1 + OS_PAGE_SIZE / MIN_OBJECT_SIZE]; if (!parr) { return FALSE; } make_c_mark_list (parr); return TRUE; } int gc_heap::check_for_ephemeral_alloc() { int gen = ((settings.reason == reason_oos_soh) ? (max_generation - 1) : -1); if (gen == -1) { #ifdef MULTIPLE_HEAPS for (int heap_index = 0; heap_index < n_heaps; heap_index++) #endif //MULTIPLE_HEAPS { for (int i = 0; i < max_generation; i++) { #ifdef MULTIPLE_HEAPS if (g_heaps[heap_index]->get_new_allocation (i) <= 0) #else if (get_new_allocation (i) <= 0) #endif //MULTIPLE_HEAPS { gen = max (gen, i); } else break; } } } return gen; } // Wait for gc to finish sequential part void gc_heap::wait_to_proceed() { assert (background_gc_done_event.IsValid()); assert (bgc_start_event.IsValid()); user_thread_wait(&ee_proceed_event, FALSE); } // Start a new concurrent gc void gc_heap::start_c_gc() { assert (background_gc_done_event.IsValid()); assert (bgc_start_event.IsValid()); //Need to make sure that the gc thread is in the right place. background_gc_done_event.Wait(INFINITE, FALSE); background_gc_done_event.Reset(); bgc_start_event.Set(); } void gc_heap::do_background_gc() { dprintf (2, ("starting a BGC")); #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { g_heaps[i]->init_background_gc(); } #else init_background_gc(); #endif //MULTIPLE_HEAPS #ifdef BGC_SERVO_TUNING bgc_tuning::record_bgc_start(); #endif //BGC_SERVO_TUNING //start the background gc start_c_gc (); //wait until we get restarted by the BGC. wait_to_proceed(); } void gc_heap::kill_gc_thread() { //assert (settings.concurrent == FALSE); // We are doing a two-stage shutdown now. // In the first stage, we do minimum work, and call ExitProcess at the end. // In the secodn stage, we have the Loader lock and only one thread is // alive. Hence we do not need to kill gc thread. background_gc_done_event.CloseEvent(); bgc_start_event.CloseEvent(); bgc_threads_timeout_cs.Destroy(); bgc_thread = 0; } void gc_heap::bgc_thread_function() { assert (background_gc_done_event.IsValid()); assert (bgc_start_event.IsValid()); dprintf (3, ("gc_thread thread starting...")); BOOL do_exit = FALSE; bool cooperative_mode = true; bgc_thread_id.SetToCurrentThread(); dprintf (1, ("bgc_thread_id is set to %x", (uint32_t)GCToOSInterface::GetCurrentThreadIdForLogging())); while (1) { // Wait for work to do... dprintf (3, ("bgc thread: waiting...")); cooperative_mode = enable_preemptive (); //current_thread->m_fPreemptiveGCDisabled = 0; uint32_t result = bgc_start_event.Wait( #ifdef _DEBUG #ifdef MULTIPLE_HEAPS INFINITE, #else 2000, #endif //MULTIPLE_HEAPS #else //_DEBUG #ifdef MULTIPLE_HEAPS INFINITE, #else 20000, #endif //MULTIPLE_HEAPS #endif //_DEBUG FALSE); dprintf (2, ("gc thread: finished waiting")); // not calling disable_preemptive here 'cause we // can't wait for GC complete here - RestartEE will be called // when we've done the init work. if (result == WAIT_TIMEOUT) { // Should join the bgc threads and terminate all of them // at once. dprintf (1, ("GC thread timeout")); bgc_threads_timeout_cs.Enter(); if (!keep_bgc_threads_p) { dprintf (2, ("GC thread exiting")); bgc_thread_running = FALSE; bgc_thread = 0; bgc_thread_id.Clear(); do_exit = TRUE; } bgc_threads_timeout_cs.Leave(); if (do_exit) break; else { dprintf (3, ("GC thread needed, not exiting")); continue; } } // if we signal the thread with no concurrent work to do -> exit if (!settings.concurrent) { dprintf (3, ("no concurrent GC needed, exiting")); break; } gc_background_running = TRUE; dprintf (2, (ThreadStressLog::gcStartBgcThread(), heap_number, generation_free_list_space (generation_of (max_generation)), generation_free_obj_space (generation_of (max_generation)), dd_fragmentation (dynamic_data_of (max_generation)))); gc1(); #ifndef DOUBLY_LINKED_FL current_bgc_state = bgc_not_in_process; #endif //!DOUBLY_LINKED_FL enable_preemptive (); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_done); if (bgc_t_join.joined()) #endif //MULTIPLE_HEAPS { enter_spin_lock (&gc_lock); dprintf (SPINLOCK_LOG, ("bgc Egc")); bgc_start_event.Reset(); do_post_gc(); #ifdef MULTIPLE_HEAPS for (int gen = max_generation; gen < total_generation_count; gen++) { size_t desired_per_heap = 0; size_t total_desired = 0; gc_heap* hp = 0; dynamic_data* dd; for (int i = 0; i < n_heaps; i++) { hp = g_heaps[i]; dd = hp->dynamic_data_of (gen); size_t temp_total_desired = total_desired + dd_desired_allocation (dd); if (temp_total_desired < total_desired) { // we overflowed. total_desired = (size_t)MAX_PTR; break; } total_desired = temp_total_desired; } desired_per_heap = Align ((total_desired/n_heaps), get_alignment_constant (FALSE)); if (gen >= loh_generation) { desired_per_heap = exponential_smoothing (gen, dd_collection_count (dynamic_data_of (max_generation)), desired_per_heap); } for (int i = 0; i < n_heaps; i++) { hp = gc_heap::g_heaps[i]; dd = hp->dynamic_data_of (gen); dd_desired_allocation (dd) = desired_per_heap; dd_gc_new_allocation (dd) = desired_per_heap; dd_new_allocation (dd) = desired_per_heap; } } #endif //MULTIPLE_HEAPS #ifdef MULTIPLE_HEAPS fire_pevents(); #endif //MULTIPLE_HEAPS c_write (settings.concurrent, FALSE); gc_background_running = FALSE; keep_bgc_threads_p = FALSE; background_gc_done_event.Set(); dprintf (SPINLOCK_LOG, ("bgc Lgc")); leave_spin_lock (&gc_lock); #ifdef MULTIPLE_HEAPS dprintf(1, ("End of BGC - starting all BGC threads")); bgc_t_join.restart(); #endif //MULTIPLE_HEAPS } // We can't disable preempt here because there might've been a GC already // started and decided to do a BGC and waiting for a BGC thread to restart // vm. That GC will be waiting in wait_to_proceed and we are waiting for it // to restart the VM so we deadlock. //gc_heap::disable_preemptive (true); } FIRE_EVENT(GCTerminateConcurrentThread_V1); dprintf (3, ("bgc_thread thread exiting")); return; } #ifdef BGC_SERVO_TUNING bool gc_heap::bgc_tuning::stepping_trigger (uint32_t current_memory_load, size_t current_gen2_count) { if (!bgc_tuning::enable_fl_tuning) { return false; } bool stepping_trigger_p = false; if (use_stepping_trigger_p) { dprintf (BGC_TUNING_LOG, ("current ml: %d, goal: %d", current_memory_load, memory_load_goal)); // We don't go all the way up to mem goal because if we do we could end up with every // BGC being triggered by stepping all the way up to goal, and when we actually reach // goal we have no time to react 'cause the next BGC could already be over goal. if ((current_memory_load <= (memory_load_goal * 2 / 3)) || ((memory_load_goal > current_memory_load) && ((memory_load_goal - current_memory_load) > (stepping_interval * 3)))) { int memory_load_delta = (int)current_memory_load - (int)last_stepping_mem_load; if (memory_load_delta >= (int)stepping_interval) { stepping_trigger_p = (current_gen2_count == last_stepping_bgc_count); if (stepping_trigger_p) { current_gen2_count++; } dprintf (BGC_TUNING_LOG, ("current ml: %d - %d = %d (>= %d), gen2 count: %d->%d, stepping trigger: %s ", current_memory_load, last_stepping_mem_load, memory_load_delta, stepping_interval, last_stepping_bgc_count, current_gen2_count, (stepping_trigger_p ? "yes" : "no"))); last_stepping_mem_load = current_memory_load; last_stepping_bgc_count = current_gen2_count; } } else { use_stepping_trigger_p = false; } } return stepping_trigger_p; } // Note that I am doing this per heap but as we are in this calculation other // heaps could increase their fl alloc. We are okay with that inaccurancy. bool gc_heap::bgc_tuning::should_trigger_bgc_loh() { if (fl_tuning_triggered) { #ifdef MULTIPLE_HEAPS gc_heap* hp = g_heaps[0]; #else gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS if (!(gc_heap::background_running_p())) { size_t current_alloc = get_total_servo_alloc (loh_generation); tuning_calculation* current_gen_calc = &gen_calc[loh_generation - max_generation]; if (current_alloc < current_gen_calc->last_bgc_end_alloc) { dprintf (BGC_TUNING_LOG, ("BTL: current alloc: %Id, last alloc: %Id?", current_alloc, current_gen_calc->last_bgc_end_alloc)); } bool trigger_p = ((current_alloc - current_gen_calc->last_bgc_end_alloc) >= current_gen_calc->alloc_to_trigger); dprintf (2, ("BTL3: LOH a %Id, la: %Id(%Id), %Id", current_alloc, current_gen_calc->last_bgc_end_alloc, (current_alloc - current_gen_calc->last_bgc_end_alloc), current_gen_calc->alloc_to_trigger)); if (trigger_p) { dprintf (BGC_TUNING_LOG, ("BTL3: LOH detected (%Id - %Id) >= %Id, TRIGGER", current_alloc, current_gen_calc->last_bgc_end_alloc, current_gen_calc->alloc_to_trigger)); return true; } } } return false; } bool gc_heap::bgc_tuning::should_trigger_bgc() { if (!bgc_tuning::enable_fl_tuning || gc_heap::background_running_p()) { return false; } if (settings.reason == reason_bgc_tuning_loh) { // TODO: this should be an assert because if the reason was reason_bgc_tuning_loh, // we should have already set to condemn max_generation but I'm keeping it // for now in case we are reverting it for other reasons. bgc_tuning::next_bgc_p = true; dprintf (BGC_TUNING_LOG, ("BTL LOH triggered")); return true; } if (!bgc_tuning::next_bgc_p && !fl_tuning_triggered && (gc_heap::settings.entry_memory_load >= (memory_load_goal * 2 / 3)) && (gc_heap::full_gc_counts[gc_type_background] >= 2)) { next_bgc_p = true; gen_calc[0].first_alloc_to_trigger = gc_heap::get_total_servo_alloc (max_generation); gen_calc[1].first_alloc_to_trigger = gc_heap::get_total_servo_alloc (loh_generation); dprintf (BGC_TUNING_LOG, ("BTL[GTC] mem high enough: %d(goal: %d), %Id BGCs done, g2a=%Id, g3a=%Id, trigger FL tuning!", gc_heap::settings.entry_memory_load, memory_load_goal, gc_heap::full_gc_counts[gc_type_background], gen_calc[0].first_alloc_to_trigger, gen_calc[1].first_alloc_to_trigger)); } if (bgc_tuning::next_bgc_p) { dprintf (BGC_TUNING_LOG, ("BTL started FL tuning")); return true; } if (!fl_tuning_triggered) { return false; } // If the tuning started, we need to check if we've exceeded the alloc. int index = 0; bgc_tuning::tuning_calculation* current_gen_calc = 0; index = 0; current_gen_calc = &bgc_tuning::gen_calc[index]; #ifdef MULTIPLE_HEAPS gc_heap* hp = g_heaps[0]; #else gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS size_t current_gen1_index = dd_collection_count (hp->dynamic_data_of (max_generation - 1)); size_t gen1_so_far = current_gen1_index - gen1_index_last_bgc_end; if (current_gen_calc->alloc_to_trigger > 0) { // We are specifically checking for gen2 here. LOH is covered by should_trigger_bgc_loh. size_t current_alloc = get_total_servo_alloc (max_generation); if ((current_alloc - current_gen_calc->last_bgc_end_alloc) >= current_gen_calc->alloc_to_trigger) { dprintf (BGC_TUNING_LOG, ("BTL2: SOH detected (%Id - %Id) >= %Id, TRIGGER", current_alloc, current_gen_calc->last_bgc_end_alloc, current_gen_calc->alloc_to_trigger)); settings.reason = reason_bgc_tuning_soh; return true; } } return false; } bool gc_heap::bgc_tuning::should_delay_alloc (int gen_number) { if ((gen_number != max_generation) || !bgc_tuning::enable_fl_tuning) return false; if (current_c_gc_state == c_gc_state_planning) { int i = 0; #ifdef MULTIPLE_HEAPS for (; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; size_t current_fl_size = generation_free_list_space (hp->generation_of (max_generation)); size_t last_bgc_fl_size = hp->bgc_maxgen_end_fl_size; #else { size_t current_fl_size = generation_free_list_space (generation_of (max_generation)); size_t last_bgc_fl_size = bgc_maxgen_end_fl_size; #endif //MULTIPLE_HEAPS if (last_bgc_fl_size) { float current_flr = (float) current_fl_size / (float)last_bgc_fl_size; if (current_flr < 0.4) { dprintf (BGC_TUNING_LOG, ("BTL%d h%d last fl %Id, curr fl %Id (%.3f) d1", gen_number, i, last_bgc_fl_size, current_fl_size, current_flr)); return true; } } } } return false; } void gc_heap::bgc_tuning::update_bgc_start (int gen_number, size_t num_gen1s_since_end) { int tuning_data_index = gen_number - max_generation; tuning_calculation* current_gen_calc = &gen_calc[tuning_data_index]; tuning_stats* current_gen_stats = &gen_stats[tuning_data_index]; size_t total_generation_size = get_total_generation_size (gen_number); ptrdiff_t current_bgc_fl_size = get_total_generation_fl_size (gen_number); double physical_gen_flr = (double)current_bgc_fl_size * 100.0 / (double)total_generation_size; ptrdiff_t artificial_additional_fl = 0; if (fl_tuning_triggered) { artificial_additional_fl = ((current_gen_calc->end_gen_size_goal > total_generation_size) ? (current_gen_calc->end_gen_size_goal - total_generation_size) : 0); total_generation_size += artificial_additional_fl; current_bgc_fl_size += artificial_additional_fl; } current_gen_calc->current_bgc_start_flr = (double)current_bgc_fl_size * 100.0 / (double)total_generation_size; size_t current_alloc = get_total_servo_alloc (gen_number); dprintf (BGC_TUNING_LOG, ("BTL%d: st a: %Id, la: %Id", gen_number, current_alloc, current_gen_stats->last_alloc)); current_gen_stats->last_alloc_end_to_start = current_alloc - current_gen_stats->last_alloc; current_gen_stats->last_alloc = current_alloc; current_gen_calc->actual_alloc_to_trigger = current_alloc - current_gen_calc->last_bgc_end_alloc; dprintf (BGC_TUNING_LOG, ("BTL%d: st: %Id g1s (%Id->%Id/gen1) since end, flr: %.3f(afl: %Id, %.3f)", gen_number, actual_num_gen1s_to_trigger, current_gen_stats->last_alloc_end_to_start, (num_gen1s_since_end ? (current_gen_stats->last_alloc_end_to_start / num_gen1s_since_end) : 0), current_gen_calc->current_bgc_start_flr, artificial_additional_fl, physical_gen_flr)); } void gc_heap::bgc_tuning::record_bgc_start() { if (!bgc_tuning::enable_fl_tuning) return; uint64_t elapsed_time_so_far = GetHighPrecisionTimeStamp() - process_start_time; // Note that younger gen's collection count is always updated with older gen's collections. // So to calcuate the actual # of gen1 occurred we really should take the # of gen2s into // account (and deduct from gen1's collection count). But right now I am using it for stats. size_t current_gen1_index = get_current_gc_index (max_generation - 1); dprintf (BGC_TUNING_LOG, ("BTL: g2t[st][g1 %Id]: %0.3f minutes", current_gen1_index, (double)elapsed_time_so_far / (double)1000000 / (double)60)); actual_num_gen1s_to_trigger = current_gen1_index - gen1_index_last_bgc_end; gen1_index_last_bgc_start = current_gen1_index; update_bgc_start (max_generation, actual_num_gen1s_to_trigger); update_bgc_start (loh_generation, actual_num_gen1s_to_trigger); } double convert_range (double lower, double upper, double num, double percentage) { double d = num - lower; if (d < 0.0) return 0.0; else { d = min ((upper - lower), d); return (d * percentage); } } double calculate_gradual_d (double delta_double, double step) { bool changed_sign = false; if (delta_double < 0.0) { delta_double = -delta_double; changed_sign = true; } double res = 0; double current_lower_limit = 0; double current_ratio = 1.0; // Given a step, we will gradually reduce the weight of the portion // in each step. // We reduce by *0.6 each time so there will be 3 iterations: // 1->0.6->0.36 (next one would be 0.216 and terminate the loop) // This will produce a result that's between 0 and 0.098. while (current_ratio > 0.22) { res += convert_range (current_lower_limit, (current_lower_limit + step), delta_double, current_ratio); current_lower_limit += step; current_ratio *= 0.6; } if (changed_sign) res = -res; return res; } void gc_heap::bgc_tuning::update_bgc_sweep_start (int gen_number, size_t num_gen1s_since_start) { int tuning_data_index = gen_number - max_generation; tuning_calculation* current_gen_calc = &gen_calc[tuning_data_index]; tuning_stats* current_gen_stats = &gen_stats[tuning_data_index]; size_t total_generation_size = 0; ptrdiff_t current_bgc_fl_size = 0; total_generation_size = get_total_generation_size (gen_number); current_bgc_fl_size = get_total_generation_fl_size (gen_number); double physical_gen_flr = (double)current_bgc_fl_size * 100.0 / (double)total_generation_size; ptrdiff_t artificial_additional_fl = 0; if (fl_tuning_triggered) { artificial_additional_fl = ((current_gen_calc->end_gen_size_goal > total_generation_size) ? (current_gen_calc->end_gen_size_goal - total_generation_size) : 0); total_generation_size += artificial_additional_fl; current_bgc_fl_size += artificial_additional_fl; } current_gen_calc->current_bgc_sweep_flr = (double)current_bgc_fl_size * 100.0 / (double)total_generation_size; size_t current_alloc = get_total_servo_alloc (gen_number); dprintf (BGC_TUNING_LOG, ("BTL%d: sw a: %Id, la: %Id", gen_number, current_alloc, current_gen_stats->last_alloc)); current_gen_stats->last_alloc_start_to_sweep = current_alloc - current_gen_stats->last_alloc; // We are resetting gen2 alloc at sweep start. current_gen_stats->last_alloc = 0; #ifdef SIMPLE_DPRINTF dprintf (BGC_TUNING_LOG, ("BTL%d: sflr: %.3f%%->%.3f%% (%Id->%Id, %Id->%Id) (%Id:%Id-%Id/gen1) since start (afl: %Id, %.3f)", gen_number, current_gen_calc->last_bgc_flr, current_gen_calc->current_bgc_sweep_flr, current_gen_calc->last_bgc_size, total_generation_size, current_gen_stats->last_bgc_fl_size, current_bgc_fl_size, num_gen1s_since_start, current_gen_stats->last_alloc_start_to_sweep, (num_gen1s_since_start? (current_gen_stats->last_alloc_start_to_sweep / num_gen1s_since_start) : 0), artificial_additional_fl, physical_gen_flr)); #endif //SIMPLE_DPRINTF } void gc_heap::bgc_tuning::record_bgc_sweep_start() { if (!bgc_tuning::enable_fl_tuning) return; size_t current_gen1_index = get_current_gc_index (max_generation - 1); size_t num_gen1s_since_start = current_gen1_index - gen1_index_last_bgc_start; gen1_index_last_bgc_sweep = current_gen1_index; uint64_t elapsed_time_so_far = GetHighPrecisionTimeStamp() - process_start_time; dprintf (BGC_TUNING_LOG, ("BTL: g2t[sw][g1 %Id]: %0.3f minutes", current_gen1_index, (double)elapsed_time_so_far / (double)1000000 / (double)60)); update_bgc_sweep_start (max_generation, num_gen1s_since_start); update_bgc_sweep_start (loh_generation, num_gen1s_since_start); } void gc_heap::bgc_tuning::calculate_tuning (int gen_number, bool use_this_loop_p) { BOOL use_kd_p = enable_kd; BOOL use_ki_p = enable_ki; BOOL use_smooth_p = enable_smooth; BOOL use_tbh_p = enable_tbh; BOOL use_ff_p = enable_ff; int tuning_data_index = gen_number - max_generation; tuning_calculation* current_gen_calc = &gen_calc[tuning_data_index]; tuning_stats* current_gen_stats = &gen_stats[tuning_data_index]; bgc_size_data* data = &current_bgc_end_data[tuning_data_index]; size_t total_generation_size = data->gen_size; size_t current_bgc_fl = data->gen_fl_size; size_t current_bgc_surv_size = get_total_surv_size (gen_number); size_t current_bgc_begin_data_size = get_total_begin_data_size (gen_number); // This is usually 0 unless a GC happened where we joined at the end of sweep size_t current_alloc = get_total_servo_alloc (gen_number); //dprintf (BGC_TUNING_LOG, ("BTL%d: current fl alloc: %Id, last recorded alloc: %Id, last_bgc_end_alloc: %Id", dprintf (BGC_TUNING_LOG, ("BTL%d: en a: %Id, la: %Id, lbgca: %Id", gen_number, current_alloc, current_gen_stats->last_alloc, current_gen_calc->last_bgc_end_alloc)); double current_bgc_surv_rate = (current_bgc_begin_data_size == 0) ? 0 : ((double)current_bgc_surv_size * 100.0 / (double)current_bgc_begin_data_size); current_gen_stats->last_alloc_sweep_to_end = current_alloc - current_gen_stats->last_alloc; size_t gen1_index = get_current_gc_index (max_generation - 1); size_t gen2_index = get_current_gc_index (max_generation); size_t num_gen1s_since_sweep = gen1_index - gen1_index_last_bgc_sweep; size_t num_gen1s_bgc_end = gen1_index - gen1_index_last_bgc_end; size_t gen_end_size_goal = current_gen_calc->end_gen_size_goal; double gen_sweep_flr_goal = current_gen_calc->sweep_flr_goal; size_t last_gen_alloc_to_trigger = current_gen_calc->alloc_to_trigger; size_t gen_actual_alloc_to_trigger = current_gen_calc->actual_alloc_to_trigger; size_t last_gen_alloc_to_trigger_0 = current_gen_calc->alloc_to_trigger_0; double current_end_to_sweep_flr = current_gen_calc->last_bgc_flr - current_gen_calc->current_bgc_sweep_flr; bool current_sweep_above_p = (current_gen_calc->current_bgc_sweep_flr > gen_sweep_flr_goal); #ifdef SIMPLE_DPRINTF dprintf (BGC_TUNING_LOG, ("BTL%d: sflr: c %.3f (%s), p %s, palloc: %Id, aalloc %Id(%s)", gen_number, current_gen_calc->current_bgc_sweep_flr, (current_sweep_above_p ? "above" : "below"), (current_gen_calc->last_sweep_above_p ? "above" : "below"), last_gen_alloc_to_trigger, current_gen_calc->actual_alloc_to_trigger, (use_this_loop_p ? "this" : "last"))); dprintf (BGC_TUNING_LOG, ("BTL%d-en[g1: %Id, g2: %Id]: end fl: %Id (%Id: S-%Id, %.3f%%->%.3f%%)", gen_number, gen1_index, gen2_index, current_bgc_fl, total_generation_size, current_bgc_surv_size, current_gen_stats->last_bgc_surv_rate, current_bgc_surv_rate)); dprintf (BGC_TUNING_LOG, ("BTLS%d sflr: %.3f, end-start: %Id(%Id), start-sweep: %Id(%Id), sweep-end: %Id(%Id)", gen_number, current_gen_calc->current_bgc_sweep_flr, (gen1_index_last_bgc_start - gen1_index_last_bgc_end), current_gen_stats->last_alloc_end_to_start, (gen1_index_last_bgc_sweep - gen1_index_last_bgc_start), current_gen_stats->last_alloc_start_to_sweep, num_gen1s_since_sweep, current_gen_stats->last_alloc_sweep_to_end)); #endif //SIMPLE_DPRINTF size_t saved_alloc_to_trigger = 0; // during our calculation alloc can be negative so use double here. double current_alloc_to_trigger = 0.0; if (!fl_tuning_triggered && use_tbh_p) { current_gen_calc->alloc_to_trigger_0 = current_gen_calc->actual_alloc_to_trigger; dprintf (BGC_TUNING_LOG, ("BTL%d[g1: %Id]: not in FL tuning yet, setting alloc_to_trigger_0 to %Id", gen_number, gen1_index, current_gen_calc->alloc_to_trigger_0)); } if (fl_tuning_triggered) { BOOL tuning_kd_finished_p = FALSE; // We shouldn't have an alloc_to_trigger that's > what's consumed before sweep happens. double max_alloc_to_trigger = ((double)current_bgc_fl * (100 - gen_sweep_flr_goal) / 100.0); double min_alloc_to_trigger = (double)current_bgc_fl * 0.05; { if (current_gen_calc->current_bgc_sweep_flr < 0.0) { dprintf (BGC_TUNING_LOG, ("BTL%d: sflr is %.3f!!! < 0, make it 0", gen_number, current_gen_calc->current_bgc_sweep_flr)); current_gen_calc->current_bgc_sweep_flr = 0.0; } double adjusted_above_goal_kp = above_goal_kp; double above_goal_distance = current_gen_calc->current_bgc_sweep_flr - gen_sweep_flr_goal; if (use_ki_p) { if (current_gen_calc->above_goal_accu_error > max_alloc_to_trigger) { dprintf (BGC_TUNING_LOG, ("g%d: ae TB! %.1f->%.1f", gen_number, current_gen_calc->above_goal_accu_error, max_alloc_to_trigger)); } else if (current_gen_calc->above_goal_accu_error < min_alloc_to_trigger) { dprintf (BGC_TUNING_LOG, ("g%d: ae TS! %.1f->%.1f", gen_number, current_gen_calc->above_goal_accu_error, min_alloc_to_trigger)); } current_gen_calc->above_goal_accu_error = min (max_alloc_to_trigger, current_gen_calc->above_goal_accu_error); current_gen_calc->above_goal_accu_error = max (min_alloc_to_trigger, current_gen_calc->above_goal_accu_error); double above_goal_ki_gain = above_goal_ki * above_goal_distance * current_bgc_fl; double temp_accu_error = current_gen_calc->above_goal_accu_error + above_goal_ki_gain; // anti-windup if ((temp_accu_error > min_alloc_to_trigger) && (temp_accu_error < max_alloc_to_trigger)) { current_gen_calc->above_goal_accu_error = temp_accu_error; } else { //dprintf (BGC_TUNING_LOG, ("alloc accu err + %.1f=%.1f, exc", dprintf (BGC_TUNING_LOG, ("g%d: aae + %.1f=%.1f, exc", gen_number, above_goal_ki_gain, temp_accu_error)); } } // First we do the PI loop. { saved_alloc_to_trigger = current_gen_calc->alloc_to_trigger; current_alloc_to_trigger = adjusted_above_goal_kp * above_goal_distance * current_bgc_fl; // la is last alloc_to_trigger, +%Id is the diff between la and the new alloc. // laa is the last actual alloc (gen_actual_alloc_to_trigger), +%Id is the diff between la and laa. dprintf (BGC_TUNING_LOG, ("BTL%d: sflr %.3f above * %.4f * %Id = %Id bytes in alloc, la: %Id(+%Id), laa: %Id(+%Id)", gen_number, (current_gen_calc->current_bgc_sweep_flr - (double)gen_sweep_flr_goal), adjusted_above_goal_kp, current_bgc_fl, (size_t)current_alloc_to_trigger, saved_alloc_to_trigger, (size_t)(current_alloc_to_trigger - (double)saved_alloc_to_trigger), gen_actual_alloc_to_trigger, (gen_actual_alloc_to_trigger - saved_alloc_to_trigger))); if (use_ki_p) { current_alloc_to_trigger += current_gen_calc->above_goal_accu_error; dprintf (BGC_TUNING_LOG, ("BTL%d: +accu err %Id=%Id", gen_number, (size_t)(current_gen_calc->above_goal_accu_error), (size_t)current_alloc_to_trigger)); } } if (use_tbh_p) { if (current_gen_calc->last_sweep_above_p != current_sweep_above_p) { size_t new_alloc_to_trigger_0 = (last_gen_alloc_to_trigger + last_gen_alloc_to_trigger_0) / 2; dprintf (BGC_TUNING_LOG, ("BTL%d: tbh crossed SP, setting both to %Id", gen_number, new_alloc_to_trigger_0)); current_gen_calc->alloc_to_trigger_0 = new_alloc_to_trigger_0; current_gen_calc->alloc_to_trigger = new_alloc_to_trigger_0; } tuning_kd_finished_p = TRUE; } } if (!tuning_kd_finished_p) { if (use_kd_p) { saved_alloc_to_trigger = last_gen_alloc_to_trigger; size_t alloc_delta = saved_alloc_to_trigger - gen_actual_alloc_to_trigger; double adjust_ratio = (double)alloc_delta / (double)gen_actual_alloc_to_trigger; double saved_adjust_ratio = adjust_ratio; if (enable_gradual_d) { adjust_ratio = calculate_gradual_d (adjust_ratio, above_goal_kd); dprintf (BGC_TUNING_LOG, ("BTL%d: gradual kd - reduced from %.3f to %.3f", gen_number, saved_adjust_ratio, adjust_ratio)); } else { double kd = above_goal_kd; double neg_kd = 0 - kd; if (adjust_ratio > kd) adjust_ratio = kd; if (adjust_ratio < neg_kd) adjust_ratio = neg_kd; dprintf (BGC_TUNING_LOG, ("BTL%d: kd - reduced from %.3f to %.3f", gen_number, saved_adjust_ratio, adjust_ratio)); } current_gen_calc->alloc_to_trigger = (size_t)((double)gen_actual_alloc_to_trigger * (1 + adjust_ratio)); dprintf (BGC_TUNING_LOG, ("BTL%d: kd %.3f, reduced it to %.3f * %Id, adjust %Id->%Id", gen_number, saved_adjust_ratio, adjust_ratio, gen_actual_alloc_to_trigger, saved_alloc_to_trigger, current_gen_calc->alloc_to_trigger)); } if (use_smooth_p && use_this_loop_p) { saved_alloc_to_trigger = current_gen_calc->alloc_to_trigger; size_t gen_smoothed_alloc_to_trigger = current_gen_calc->smoothed_alloc_to_trigger; double current_num_gen1s_smooth_factor = (num_gen1s_smooth_factor > (double)num_bgcs_since_tuning_trigger) ? (double)num_bgcs_since_tuning_trigger : num_gen1s_smooth_factor; current_gen_calc->smoothed_alloc_to_trigger = (size_t)((double)saved_alloc_to_trigger / current_num_gen1s_smooth_factor + ((double)gen_smoothed_alloc_to_trigger / current_num_gen1s_smooth_factor) * (current_num_gen1s_smooth_factor - 1.0)); dprintf (BGC_TUNING_LOG, ("BTL%d: smoothed %Id / %.3f + %Id / %.3f * %.3f adjust %Id->%Id", gen_number, saved_alloc_to_trigger, current_num_gen1s_smooth_factor, gen_smoothed_alloc_to_trigger, current_num_gen1s_smooth_factor, (current_num_gen1s_smooth_factor - 1.0), saved_alloc_to_trigger, current_gen_calc->smoothed_alloc_to_trigger)); current_gen_calc->alloc_to_trigger = current_gen_calc->smoothed_alloc_to_trigger; } } if (use_ff_p) { double next_end_to_sweep_flr = data->gen_flr - gen_sweep_flr_goal; if (next_end_to_sweep_flr > 0.0) { saved_alloc_to_trigger = current_gen_calc->alloc_to_trigger; double ff_ratio = next_end_to_sweep_flr / current_end_to_sweep_flr - 1; if (use_this_loop_p) { // if we adjust down we want ff to be bigger, so the alloc will be even smaller; // if we adjust up want ff to be smaller, so the alloc will also be smaller; // the idea is we want to be slower at increase than decrease double ff_step = above_goal_ff * 0.5; double adjusted_above_goal_ff = above_goal_ff; if (ff_ratio > 0) adjusted_above_goal_ff -= ff_step; else adjusted_above_goal_ff += ff_step; double adjusted_ff_ratio = ff_ratio * adjusted_above_goal_ff; current_gen_calc->alloc_to_trigger = saved_alloc_to_trigger + (size_t)((double)saved_alloc_to_trigger * adjusted_ff_ratio); dprintf (BGC_TUNING_LOG, ("BTL%d: ff (%.3f / %.3f - 1) * %.3f = %.3f adjust %Id->%Id", gen_number, next_end_to_sweep_flr, current_end_to_sweep_flr, adjusted_above_goal_ff, adjusted_ff_ratio, saved_alloc_to_trigger, current_gen_calc->alloc_to_trigger)); } } } if (use_this_loop_p) { // apply low/high caps. if (current_alloc_to_trigger > max_alloc_to_trigger) { dprintf (BGC_TUNING_LOG, ("BTL%d: TB! %.1f -> %.1f", gen_number, current_alloc_to_trigger, max_alloc_to_trigger)); current_alloc_to_trigger = max_alloc_to_trigger; } if (current_alloc_to_trigger < min_alloc_to_trigger) { dprintf (BGC_TUNING_LOG, ("BTL%d: TS! %Id -> %Id", gen_number, (ptrdiff_t)current_alloc_to_trigger, (size_t)min_alloc_to_trigger)); current_alloc_to_trigger = min_alloc_to_trigger; } current_gen_calc->alloc_to_trigger = (size_t)current_alloc_to_trigger; } else { // we can't do the above comparison - we could be in the situation where // we haven't done any alloc. dprintf (BGC_TUNING_LOG, ("BTL%d: ag, revert %Id->%Id", gen_number, current_gen_calc->alloc_to_trigger, last_gen_alloc_to_trigger)); current_gen_calc->alloc_to_trigger = last_gen_alloc_to_trigger; } } // This is only executed once to get the tuning started. if (next_bgc_p) { size_t first_alloc = (size_t)((double)current_gen_calc->first_alloc_to_trigger * 0.75); // The initial conditions can be quite erratic so check to see if the first alloc we set was reasonable - take 5% of the FL size_t min_first_alloc = current_bgc_fl / 20; current_gen_calc->alloc_to_trigger = max (first_alloc, min_first_alloc); dprintf (BGC_TUNING_LOG, ("BTL%d[g1: %Id]: BGC end, trigger FL, set gen%d alloc to max (0.75 of first: %Id, 5%% fl: %Id), actual alloc: %Id", gen_number, gen1_index, gen_number, first_alloc, min_first_alloc, current_gen_calc->actual_alloc_to_trigger)); } dprintf (BGC_TUNING_LOG, ("BTL%d* %Id, %.3f, %.3f, %.3f, %.3f, %.3f, %Id, %Id, %Id, %Id", gen_number, total_generation_size, current_gen_calc->current_bgc_start_flr, current_gen_calc->current_bgc_sweep_flr, current_bgc_end_data[tuning_data_index].gen_flr, current_gen_stats->last_gen_increase_flr, current_bgc_surv_rate, actual_num_gen1s_to_trigger, num_gen1s_bgc_end, gen_actual_alloc_to_trigger, current_gen_calc->alloc_to_trigger)); gen1_index_last_bgc_end = gen1_index; current_gen_calc->last_bgc_size = total_generation_size; current_gen_calc->last_bgc_flr = current_bgc_end_data[tuning_data_index].gen_flr; current_gen_calc->last_sweep_above_p = current_sweep_above_p; current_gen_calc->last_bgc_end_alloc = current_alloc; current_gen_stats->last_bgc_physical_size = data->gen_physical_size; current_gen_stats->last_alloc_end_to_start = 0; current_gen_stats->last_alloc_start_to_sweep = 0; current_gen_stats->last_alloc_sweep_to_end = 0; current_gen_stats->last_alloc = current_alloc; current_gen_stats->last_bgc_fl_size = current_bgc_end_data[tuning_data_index].gen_fl_size; current_gen_stats->last_bgc_surv_rate = current_bgc_surv_rate; current_gen_stats->last_gen_increase_flr = 0; } // Note that in this method for the !use_this_loop_p generation we will adjust // its sweep_flr accordingly. And the inner loop will not need to know about this. void gc_heap::bgc_tuning::init_bgc_end_data (int gen_number, bool use_this_loop_p) { int index = gen_number - max_generation; bgc_size_data* data = &current_bgc_end_data[index]; size_t physical_size = get_total_generation_size (gen_number); ptrdiff_t physical_fl_size = get_total_generation_fl_size (gen_number); data->gen_actual_phys_fl_size = physical_fl_size; if (fl_tuning_triggered && !use_this_loop_p) { tuning_calculation* current_gen_calc = &gen_calc[gen_number - max_generation]; if (current_gen_calc->actual_alloc_to_trigger > current_gen_calc->alloc_to_trigger) { dprintf (BGC_TUNING_LOG, ("BTL%d: gen alloc also exceeded %Id (la: %Id), no action", gen_number, current_gen_calc->actual_alloc_to_trigger, current_gen_calc->alloc_to_trigger)); } else { // We will deduct the missing portion from alloc to fl, simulating that we consumed it. size_t remaining_alloc = current_gen_calc->alloc_to_trigger - current_gen_calc->actual_alloc_to_trigger; // now re-calc current_bgc_sweep_flr // TODO: note that I am assuming the physical size at sweep was <= end_gen_size_goal which // not have been the case. size_t gen_size = current_gen_calc->end_gen_size_goal; double sweep_flr = current_gen_calc->current_bgc_sweep_flr; size_t sweep_fl_size = (size_t)((double)gen_size * sweep_flr / 100.0); if (sweep_fl_size < remaining_alloc) { dprintf (BGC_TUNING_LOG, ("BTL%d: sweep fl %Id < remain alloc %Id", gen_number, sweep_fl_size, remaining_alloc)); // TODO: this is saying that we didn't have enough fl to accommodate the // remaining alloc which is suspicious. To set remaining_alloc to // something slightly smaller is only so that we could continue with // our calculation but this is something we should look into. remaining_alloc = sweep_fl_size - (10 * 1024); } size_t new_sweep_fl_size = sweep_fl_size - remaining_alloc; ptrdiff_t signed_new_sweep_fl_size = sweep_fl_size - remaining_alloc; double new_current_bgc_sweep_flr = (double)new_sweep_fl_size * 100.0 / (double)gen_size; double signed_new_current_bgc_sweep_flr = (double)signed_new_sweep_fl_size * 100.0 / (double)gen_size; dprintf (BGC_TUNING_LOG, ("BTL%d: sg: %Id(%Id), sfl: %Id->%Id(%Id)(%.3f->%.3f(%.3f)), la: %Id, aa: %Id", gen_number, gen_size, physical_size, sweep_fl_size, new_sweep_fl_size, signed_new_sweep_fl_size, sweep_flr, new_current_bgc_sweep_flr, signed_new_current_bgc_sweep_flr, current_gen_calc->alloc_to_trigger, current_gen_calc->actual_alloc_to_trigger)); current_gen_calc->actual_alloc_to_trigger = current_gen_calc->alloc_to_trigger; current_gen_calc->current_bgc_sweep_flr = new_current_bgc_sweep_flr; // TODO: NOTE this is duplicated in calculate_tuning except I am not * 100.0 here. size_t current_bgc_surv_size = get_total_surv_size (gen_number); size_t current_bgc_begin_data_size = get_total_begin_data_size (gen_number); double current_bgc_surv_rate = (current_bgc_begin_data_size == 0) ? 0 : ((double)current_bgc_surv_size / (double)current_bgc_begin_data_size); size_t remaining_alloc_surv = (size_t)((double)remaining_alloc * current_bgc_surv_rate); physical_fl_size -= remaining_alloc_surv; dprintf (BGC_TUNING_LOG, ("BTL%d: asfl %Id-%Id=%Id, flr %.3f->%.3f, %.3f%% s, fl %Id-%Id->%Id", gen_number, sweep_fl_size, remaining_alloc, new_sweep_fl_size, sweep_flr, current_gen_calc->current_bgc_sweep_flr, (current_bgc_surv_rate * 100.0), (physical_fl_size + remaining_alloc_surv), remaining_alloc_surv, physical_fl_size)); } } double physical_gen_flr = (double)physical_fl_size * 100.0 / (double)physical_size; data->gen_physical_size = physical_size; data->gen_physical_fl_size = physical_fl_size; data->gen_physical_flr = physical_gen_flr; } void gc_heap::bgc_tuning::calc_end_bgc_fl (int gen_number) { int index = gen_number - max_generation; bgc_size_data* data = &current_bgc_end_data[index]; tuning_calculation* current_gen_calc = &gen_calc[gen_number - max_generation]; size_t virtual_size = current_gen_calc->end_gen_size_goal; size_t physical_size = data->gen_physical_size; ptrdiff_t physical_fl_size = data->gen_physical_fl_size; ptrdiff_t virtual_fl_size = (ptrdiff_t)virtual_size - (ptrdiff_t)physical_size; ptrdiff_t end_gen_fl_size = physical_fl_size + virtual_fl_size; if (end_gen_fl_size < 0) { end_gen_fl_size = 0; } data->gen_size = virtual_size; data->gen_fl_size = end_gen_fl_size; data->gen_flr = (double)(data->gen_fl_size) * 100.0 / (double)(data->gen_size); dprintf (BGC_TUNING_LOG, ("BTL%d: vfl: %Id, size %Id->%Id, fl %Id->%Id, flr %.3f->%.3f", gen_number, virtual_fl_size, data->gen_physical_size, data->gen_size, data->gen_physical_fl_size, data->gen_fl_size, data->gen_physical_flr, data->gen_flr)); } // reduce_p is for NGC2s. we want to reduce the ki so we don't overshoot. double gc_heap::bgc_tuning::calculate_ml_tuning (uint64_t current_available_physical, bool reduce_p, ptrdiff_t* _vfl_from_kp, ptrdiff_t* _vfl_from_ki) { ptrdiff_t error = (ptrdiff_t)(current_available_physical - available_memory_goal); // This is questionable as gen0/1 and other processes are consuming memory // too size_t gen2_physical_size = current_bgc_end_data[0].gen_physical_size; size_t gen3_physical_size = current_bgc_end_data[1].gen_physical_size; double max_output = (double)(total_physical_mem - available_memory_goal - gen2_physical_size - gen3_physical_size); double error_ratio = (double)error / (double)total_physical_mem; // do we want this to contribute to the integral term? bool include_in_i_p = ((error_ratio > 0.005) || (error_ratio < -0.005)); dprintf (BGC_TUNING_LOG, ("total phy %Id, mem goal: %Id, curr phy: %Id, g2 phy: %Id, g3 phy: %Id", (size_t)total_physical_mem, (size_t)available_memory_goal, (size_t)current_available_physical, gen2_physical_size, gen3_physical_size)); dprintf (BGC_TUNING_LOG, ("BTL: Max output: %Id, ER %Id / %Id = %.3f, %s", (size_t)max_output, error, available_memory_goal, error_ratio, (include_in_i_p ? "inc" : "exc"))); if (include_in_i_p) { double error_ki = ml_ki * (double)error; double temp_accu_error = accu_error + error_ki; // anti-windup if ((temp_accu_error > 0) && (temp_accu_error < max_output)) accu_error = temp_accu_error; else { //dprintf (BGC_TUNING_LOG, ("ml accu err + %Id=%Id, exc", dprintf (BGC_TUNING_LOG, ("mae + %Id=%Id, exc", (size_t)error_ki, (size_t)temp_accu_error)); } } if (reduce_p) { double saved_accu_error = accu_error; accu_error = accu_error * 2.0 / 3.0; panic_activated_p = false; accu_error_panic = 0; dprintf (BGC_TUNING_LOG, ("BTL reduced accu ki %Id->%Id", (ptrdiff_t)saved_accu_error, (ptrdiff_t)accu_error)); } if (panic_activated_p) accu_error_panic += (double)error; else accu_error_panic = 0.0; double vfl_from_kp = (double)error * ml_kp; double total_virtual_fl_size = vfl_from_kp + accu_error; // limit output if (total_virtual_fl_size < 0) { dprintf (BGC_TUNING_LOG, ("BTL vfl %Id < 0", (size_t)total_virtual_fl_size)); total_virtual_fl_size = 0; } else if (total_virtual_fl_size > max_output) { dprintf (BGC_TUNING_LOG, ("BTL vfl %Id > max", (size_t)total_virtual_fl_size)); total_virtual_fl_size = max_output; } *_vfl_from_kp = (ptrdiff_t)vfl_from_kp; *_vfl_from_ki = (ptrdiff_t)accu_error; return total_virtual_fl_size; } void gc_heap::bgc_tuning::set_total_gen_sizes (bool use_gen2_loop_p, bool use_gen3_loop_p) { size_t gen2_physical_size = current_bgc_end_data[0].gen_physical_size; size_t gen3_physical_size = 0; ptrdiff_t gen3_virtual_fl_size = 0; gen3_physical_size = current_bgc_end_data[1].gen_physical_size; double gen2_size_ratio = (double)gen2_physical_size / ((double)gen2_physical_size + (double)gen3_physical_size); // We know how far we are from the memory load goal, assuming that the memory is only // used by gen2/3 (which is obviously not the case, but that's why we are not setting the // memory goal at 90+%. Assign the memory proportionally to them. // // We use entry memory load info because that seems to be more closedly correlated to what the VMM decides // in memory load. uint32_t current_memory_load = settings.entry_memory_load; uint64_t current_available_physical = settings.entry_available_physical_mem; panic_activated_p = (current_memory_load >= (memory_load_goal + memory_load_goal_slack)); if (panic_activated_p) { dprintf (BGC_TUNING_LOG, ("BTL: exceeded slack %Id >= (%Id + %Id)", (size_t)current_memory_load, (size_t)memory_load_goal, (size_t)memory_load_goal_slack)); } ptrdiff_t vfl_from_kp = 0; ptrdiff_t vfl_from_ki = 0; double total_virtual_fl_size = calculate_ml_tuning (current_available_physical, false, &vfl_from_kp, &vfl_from_ki); if (use_gen2_loop_p || use_gen3_loop_p) { if (use_gen2_loop_p) { gen2_ratio_correction += ratio_correction_step; } else { gen2_ratio_correction -= ratio_correction_step; } dprintf (BGC_TUNING_LOG, ("BTL: rc: g2 ratio %.3f%% + %d%% = %.3f%%", (gen2_size_ratio * 100.0), (int)(gen2_ratio_correction * 100.0), ((gen2_size_ratio + gen2_ratio_correction) * 100.0))); gen2_ratio_correction = min (0.99, gen2_ratio_correction); gen2_ratio_correction = max (-0.99, gen2_ratio_correction); dprintf (BGC_TUNING_LOG, ("BTL: rc again: g2 ratio %.3f%% + %d%% = %.3f%%", (gen2_size_ratio * 100.0), (int)(gen2_ratio_correction * 100.0), ((gen2_size_ratio + gen2_ratio_correction) * 100.0))); gen2_size_ratio += gen2_ratio_correction; if (gen2_size_ratio <= 0.0) { gen2_size_ratio = 0.01; dprintf (BGC_TUNING_LOG, ("BTL: rc: g2 ratio->0.01")); } if (gen2_size_ratio >= 1.0) { gen2_size_ratio = 0.99; dprintf (BGC_TUNING_LOG, ("BTL: rc: g2 ratio->0.99")); } } ptrdiff_t gen2_virtual_fl_size = (ptrdiff_t)(total_virtual_fl_size * gen2_size_ratio); gen3_virtual_fl_size = (ptrdiff_t)(total_virtual_fl_size * (1.0 - gen2_size_ratio)); if (gen2_virtual_fl_size < 0) { ptrdiff_t saved_gen2_virtual_fl_size = gen2_virtual_fl_size; ptrdiff_t half_gen2_physical_size = (ptrdiff_t)((double)gen2_physical_size * 0.5); if (-gen2_virtual_fl_size > half_gen2_physical_size) { gen2_virtual_fl_size = -half_gen2_physical_size; } dprintf (BGC_TUNING_LOG, ("BTL2: n_vfl %Id(%Id)->%Id", saved_gen2_virtual_fl_size, half_gen2_physical_size, gen2_virtual_fl_size)); gen2_virtual_fl_size = 0; } if (gen3_virtual_fl_size < 0) { ptrdiff_t saved_gen3_virtual_fl_size = gen3_virtual_fl_size; ptrdiff_t half_gen3_physical_size = (ptrdiff_t)((double)gen3_physical_size * 0.5); if (-gen3_virtual_fl_size > half_gen3_physical_size) { gen3_virtual_fl_size = -half_gen3_physical_size; } dprintf (BGC_TUNING_LOG, ("BTL3: n_vfl %Id(%Id)->%Id", saved_gen3_virtual_fl_size, half_gen3_physical_size, gen3_virtual_fl_size)); gen3_virtual_fl_size = 0; } gen_calc[0].end_gen_size_goal = gen2_physical_size + gen2_virtual_fl_size; gen_calc[1].end_gen_size_goal = gen3_physical_size + gen3_virtual_fl_size; // We calculate the end info here because the ff in fl servo loop is using this. calc_end_bgc_fl (max_generation); calc_end_bgc_fl (loh_generation); #ifdef SIMPLE_DPRINTF dprintf (BGC_TUNING_LOG, ("BTL: ml: %d (g: %d)(%s), a: %I64d (g: %I64d, elg: %Id+%Id=%Id, %Id+%Id=%Id, pi=%Id), vfl: %Id=%Id+%Id", current_memory_load, memory_load_goal, ((current_available_physical > available_memory_goal) ? "above" : "below"), current_available_physical, available_memory_goal, gen2_physical_size, gen2_virtual_fl_size, gen_calc[0].end_gen_size_goal, gen3_physical_size, gen3_virtual_fl_size, gen_calc[1].end_gen_size_goal, (ptrdiff_t)accu_error_panic, (ptrdiff_t)total_virtual_fl_size, vfl_from_kp, vfl_from_ki)); #endif //SIMPLE_DPRINTF } bool gc_heap::bgc_tuning::should_trigger_ngc2() { return panic_activated_p; } // This is our outer ml servo loop where we calculate the control for the inner fl servo loop. void gc_heap::bgc_tuning::convert_to_fl (bool use_gen2_loop_p, bool use_gen3_loop_p) { size_t current_bgc_count = full_gc_counts[gc_type_background]; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; hp->bgc_maxgen_end_fl_size = generation_free_list_space (hp->generation_of (max_generation)); } #else bgc_maxgen_end_fl_size = generation_free_list_space (generation_of (max_generation)); #endif //MULTIPLE_HEAPS init_bgc_end_data (max_generation, use_gen2_loop_p); init_bgc_end_data (loh_generation, use_gen3_loop_p); set_total_gen_sizes (use_gen2_loop_p, use_gen3_loop_p); dprintf (BGC_TUNING_LOG, ("BTL: gen2 %Id, fl %Id(%.3f)->%Id; gen3 %Id, fl %Id(%.3f)->%Id, %Id BGCs", current_bgc_end_data[0].gen_size, current_bgc_end_data[0].gen_fl_size, current_bgc_end_data[0].gen_flr, gen_calc[0].end_gen_size_goal, current_bgc_end_data[1].gen_size, current_bgc_end_data[1].gen_fl_size, current_bgc_end_data[1].gen_flr, gen_calc[1].end_gen_size_goal, current_bgc_count)); } void gc_heap::bgc_tuning::record_and_adjust_bgc_end() { if (!bgc_tuning::enable_fl_tuning) return; uint64_t elapsed_time_so_far = GetHighPrecisionTimeStamp() - process_start_time; size_t current_gen1_index = get_current_gc_index (max_generation - 1); dprintf (BGC_TUNING_LOG, ("BTL: g2t[en][g1 %Id]: %0.3f minutes", current_gen1_index, (double)elapsed_time_so_far / (double)1000000 / (double)60)); if (fl_tuning_triggered) { num_bgcs_since_tuning_trigger++; } bool use_gen2_loop_p = (settings.reason == reason_bgc_tuning_soh); bool use_gen3_loop_p = (settings.reason == reason_bgc_tuning_loh); dprintf (BGC_TUNING_LOG, ("BTL: reason: %d, gen2 loop: %s; gen3 loop: %s, promoted %Id bytes", (((settings.reason != reason_bgc_tuning_soh) && (settings.reason != reason_bgc_tuning_loh)) ? saved_bgc_tuning_reason : settings.reason), (use_gen2_loop_p ? "yes" : "no"), (use_gen3_loop_p ? "yes" : "no"), get_total_bgc_promoted())); convert_to_fl (use_gen2_loop_p, use_gen3_loop_p); calculate_tuning (max_generation, true); if (total_loh_a_last_bgc > 0) { calculate_tuning (loh_generation, true); } else { dprintf (BGC_TUNING_LOG, ("BTL: gen3 not allocated")); } if (next_bgc_p) { next_bgc_p = false; fl_tuning_triggered = true; dprintf (BGC_TUNING_LOG, ("BTL: FL tuning ENABLED!!!")); } saved_bgc_tuning_reason = -1; } #endif //BGC_SERVO_TUNING #endif //BACKGROUND_GC //Clear the cards [start_card, end_card[ void gc_heap::clear_cards (size_t start_card, size_t end_card) { if (start_card < end_card) { size_t start_word = card_word (start_card); size_t end_word = card_word (end_card); if (start_word < end_word) { // Figure out the bit positions of the cards within their words unsigned bits = card_bit (start_card); card_table [start_word] &= lowbits (~0, bits); for (size_t i = start_word+1; i < end_word; i++) card_table [i] = 0; bits = card_bit (end_card); // Don't write beyond end_card (and possibly uncommitted card table space). if (bits != 0) { card_table [end_word] &= highbits (~0, bits); } } else { // If the start and end cards are in the same word, just clear the appropriate card // bits in that word. card_table [start_word] &= (lowbits (~0, card_bit (start_card)) | highbits (~0, card_bit (end_card))); } #if defined(_DEBUG) && defined(VERIFY_HEAP) if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { size_t card = start_card; while (card < end_card) { assert (!(card_set_p (card))); card++; } } #endif //_DEBUG && VERIFY_HEAP dprintf (3,("Cleared cards [%Ix:%Ix, %Ix:%Ix[", start_card, (size_t)card_address (start_card), end_card, (size_t)card_address (end_card))); } } void gc_heap::clear_card_for_addresses (uint8_t* start_address, uint8_t* end_address) { size_t start_card = card_of (align_on_card (start_address)); size_t end_card = card_of (align_lower_card (end_address)); clear_cards (start_card, end_card); } // copy [srccard, ...[ to [dst_card, end_card[ // This will set the same bit twice. Can be optimized. inline void gc_heap::copy_cards (size_t dst_card, size_t src_card, size_t end_card, BOOL nextp) { // If the range is empty, this function is a no-op - with the subtlety that // either of the accesses card_table[srcwrd] or card_table[dstwrd] could be // outside the committed region. To avoid the access, leave early. if (!(dst_card < end_card)) return; unsigned int srcbit = card_bit (src_card); unsigned int dstbit = card_bit (dst_card); size_t srcwrd = card_word (src_card); size_t dstwrd = card_word (dst_card); unsigned int srctmp = card_table[srcwrd]; unsigned int dsttmp = card_table[dstwrd]; for (size_t card = dst_card; card < end_card; card++) { if (srctmp & (1 << srcbit)) dsttmp |= 1 << dstbit; else dsttmp &= ~(1 << dstbit); if (!(++srcbit % 32)) { srctmp = card_table[++srcwrd]; srcbit = 0; } if (nextp) { if (srctmp & (1 << srcbit)) dsttmp |= 1 << dstbit; } if (!(++dstbit % 32)) { card_table[dstwrd] = dsttmp; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES if (dsttmp != 0) { card_bundle_set(cardw_card_bundle(dstwrd)); } #endif dstwrd++; dsttmp = card_table[dstwrd]; dstbit = 0; } } card_table[dstwrd] = dsttmp; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES if (dsttmp != 0) { card_bundle_set(cardw_card_bundle(dstwrd)); } #endif } void gc_heap::copy_cards_for_addresses (uint8_t* dest, uint8_t* src, size_t len) { ptrdiff_t relocation_distance = src - dest; size_t start_dest_card = card_of (align_on_card (dest)); size_t end_dest_card = card_of (dest + len - 1); size_t dest_card = start_dest_card; size_t src_card = card_of (card_address (dest_card)+relocation_distance); dprintf (3,("Copying cards [%Ix:%Ix->%Ix:%Ix, ", src_card, (size_t)src, dest_card, (size_t)dest)); dprintf (3,(" %Ix->%Ix:%Ix[", (size_t)src+len, end_dest_card, (size_t)dest+len)); dprintf (3, ("dest: %Ix, src: %Ix, len: %Ix, reloc: %Ix, align_on_card(dest) is %Ix", dest, src, len, relocation_distance, (align_on_card (dest)))); dprintf (3, ("start_dest_card: %Ix (address: %Ix), end_dest_card: %Ix(addr: %Ix), card_of (dest): %Ix", start_dest_card, card_address (start_dest_card), end_dest_card, card_address (end_dest_card), card_of (dest))); //First card has two boundaries if (start_dest_card != card_of (dest)) { if ((card_of (card_address (start_dest_card) + relocation_distance) <= card_of (src + len - 1))&& card_set_p (card_of (card_address (start_dest_card) + relocation_distance))) { dprintf (3, ("card_address (start_dest_card) + reloc is %Ix, card: %Ix(set), src+len-1: %Ix, card: %Ix", (card_address (start_dest_card) + relocation_distance), card_of (card_address (start_dest_card) + relocation_distance), (src + len - 1), card_of (src + len - 1))); dprintf (3, ("setting card: %Ix", card_of (dest))); set_card (card_of (dest)); } } if (card_set_p (card_of (src))) set_card (card_of (dest)); copy_cards (dest_card, src_card, end_dest_card, ((dest - align_lower_card (dest)) != (src - align_lower_card (src)))); //Last card has two boundaries. if ((card_of (card_address (end_dest_card) + relocation_distance) >= card_of (src)) && card_set_p (card_of (card_address (end_dest_card) + relocation_distance))) { dprintf (3, ("card_address (end_dest_card) + reloc is %Ix, card: %Ix(set), src: %Ix, card: %Ix", (card_address (end_dest_card) + relocation_distance), card_of (card_address (end_dest_card) + relocation_distance), src, card_of (src))); dprintf (3, ("setting card: %Ix", end_dest_card)); set_card (end_dest_card); } if (card_set_p (card_of (src + len - 1))) set_card (end_dest_card); #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES card_bundles_set(cardw_card_bundle(card_word(card_of(dest))), cardw_card_bundle(align_cardw_on_bundle(card_word(end_dest_card)))); #endif } #ifdef BACKGROUND_GC // this does not need the Interlocked version of mark_array_set_marked. void gc_heap::copy_mark_bits_for_addresses (uint8_t* dest, uint8_t* src, size_t len) { dprintf (3, ("Copying mark_bits for addresses [%Ix->%Ix, %Ix->%Ix[", (size_t)src, (size_t)dest, (size_t)src+len, (size_t)dest+len)); uint8_t* src_o = src; uint8_t* dest_o; uint8_t* src_end = src + len; int align_const = get_alignment_constant (TRUE); ptrdiff_t reloc = dest - src; while (src_o < src_end) { uint8_t* next_o = src_o + Align (size (src_o), align_const); if (background_object_marked (src_o, TRUE)) { dest_o = src_o + reloc; background_mark (dest_o, background_saved_lowest_address, background_saved_highest_address); dprintf (3, ("bc*%Ix*bc, b*%Ix*b", (size_t)src_o, (size_t)(dest_o))); } src_o = next_o; } } #endif //BACKGROUND_GC void gc_heap::fix_brick_to_highest (uint8_t* o, uint8_t* next_o) { size_t new_current_brick = brick_of (o); set_brick (new_current_brick, (o - brick_address (new_current_brick))); size_t b = 1 + new_current_brick; size_t limit = brick_of (next_o); //dprintf(3,(" fixing brick %Ix to point to object %Ix, till %Ix(%Ix)", dprintf(3,("b:%Ix->%Ix-%Ix", new_current_brick, (size_t)o, (size_t)next_o)); while (b < limit) { set_brick (b,(new_current_brick - b)); b++; } } // start can not be >= heap_segment_allocated for the segment. uint8_t* gc_heap::find_first_object (uint8_t* start, uint8_t* first_object) { size_t brick = brick_of (start); uint8_t* o = 0; //last_object == null -> no search shortcut needed if ((brick == brick_of (first_object) || (start <= first_object))) { o = first_object; } else { ptrdiff_t min_brick = (ptrdiff_t)brick_of (first_object); ptrdiff_t prev_brick = (ptrdiff_t)brick - 1; int brick_entry = 0; while (1) { if (prev_brick < min_brick) { break; } if ((brick_entry = get_brick_entry(prev_brick)) >= 0) { break; } assert (! ((brick_entry == 0))); prev_brick = (brick_entry + prev_brick); } o = ((prev_brick < min_brick) ? first_object : brick_address (prev_brick) + brick_entry - 1); assert (o <= start); } assert (Align (size (o)) >= Align (min_obj_size)); uint8_t* next_o = o + Align (size (o)); size_t curr_cl = (size_t)next_o / brick_size; size_t min_cl = (size_t)first_object / brick_size; #ifdef TRACE_GC unsigned int n_o = 1; #endif //TRACE_GC uint8_t* next_b = min (align_lower_brick (next_o) + brick_size, start+1); while (next_o <= start) { do { #ifdef TRACE_GC n_o++; #endif //TRACE_GC o = next_o; assert (Align (size (o)) >= Align (min_obj_size)); next_o = o + Align (size (o)); Prefetch (next_o); }while (next_o < next_b); if (((size_t)next_o / brick_size) != curr_cl) { if (curr_cl >= min_cl) { fix_brick_to_highest (o, next_o); } curr_cl = (size_t) next_o / brick_size; } next_b = min (align_lower_brick (next_o) + brick_size, start+1); } size_t bo = brick_of (o); //dprintf (3, ("Looked at %Id objects, fixing brick [%Ix-[%Ix", dprintf (3, ("%Id o, [%Ix-[%Ix", n_o, bo, brick)); if (bo < brick) { set_brick (bo, (o - brick_address(bo))); size_t b = 1 + bo; int x = -1; while (b < brick) { set_brick (b,x--); b++; } } return o; } #ifdef CARD_BUNDLE // Find the first non-zero card word between cardw and cardw_end. // The index of the word we find is returned in cardw. BOOL gc_heap::find_card_dword (size_t& cardw, size_t cardw_end) { dprintf (3, ("gc: %d, find_card_dword cardw: %Ix, cardw_end: %Ix", dd_collection_count (dynamic_data_of (0)), cardw, cardw_end)); if (card_bundles_enabled()) { size_t cardb = cardw_card_bundle (cardw); size_t end_cardb = cardw_card_bundle (align_cardw_on_bundle (cardw_end)); while (1) { // Find a non-zero bundle while ((cardb < end_cardb) && (card_bundle_set_p (cardb) == 0)) { cardb++; } if (cardb == end_cardb) return FALSE; uint32_t* card_word = &card_table[max(card_bundle_cardw (cardb),cardw)]; uint32_t* card_word_end = &card_table[min(card_bundle_cardw (cardb+1),cardw_end)]; while ((card_word < card_word_end) && !(*card_word)) { card_word++; } if (card_word != card_word_end) { cardw = (card_word - &card_table[0]); return TRUE; } else if ((cardw <= card_bundle_cardw (cardb)) && (card_word == &card_table [card_bundle_cardw (cardb+1)])) { // a whole bundle was explored and is empty dprintf (3, ("gc: %d, find_card_dword clear bundle: %Ix cardw:[%Ix,%Ix[", dd_collection_count (dynamic_data_of (0)), cardb, card_bundle_cardw (cardb), card_bundle_cardw (cardb+1))); card_bundle_clear (cardb); } cardb++; } } else { uint32_t* card_word = &card_table[cardw]; uint32_t* card_word_end = &card_table [cardw_end]; while (card_word < card_word_end) { if ((*card_word) != 0) { cardw = (card_word - &card_table [0]); return TRUE; } card_word++; } return FALSE; } } #endif //CARD_BUNDLE // Find cards that are set between two points in a card table. // Parameters // card_table : The card table. // card : [in/out] As input, the card to start searching from. // As output, the first card that's set. // card_word_end : The card word at which to stop looking. // end_card : [out] The last card which is set. BOOL gc_heap::find_card(uint32_t* card_table, size_t& card, size_t card_word_end, size_t& end_card) { uint32_t* last_card_word; uint32_t card_word_value; uint32_t bit_position; if (card_word (card) >= card_word_end) return FALSE; // Find the first card which is set last_card_word = &card_table [card_word (card)]; bit_position = card_bit (card); card_word_value = (*last_card_word) >> bit_position; if (!card_word_value) { bit_position = 0; #ifdef CARD_BUNDLE // Using the card bundle, go through the remaining card words between here and // card_word_end until we find one that is non-zero. size_t lcw = card_word(card) + 1; if (gc_heap::find_card_dword (lcw, card_word_end) == FALSE) { return FALSE; } else { last_card_word = &card_table [lcw]; card_word_value = *last_card_word; } #else //CARD_BUNDLE // Go through the remaining card words between here and card_word_end until we find // one that is non-zero. do { ++last_card_word; } while ((last_card_word < &card_table [card_word_end]) && !(*last_card_word)); if (last_card_word < &card_table [card_word_end]) { card_word_value = *last_card_word; } else { // We failed to find any non-zero card words before we got to card_word_end return FALSE; } #endif //CARD_BUNDLE } // Look for the lowest bit set if (card_word_value) { while (!(card_word_value & 1)) { bit_position++; card_word_value = card_word_value / 2; } } // card is the card word index * card size + the bit index within the card card = (last_card_word - &card_table[0]) * card_word_width + bit_position; do { // Keep going until we get to an un-set card. bit_position++; card_word_value = card_word_value / 2; // If we reach the end of the card word and haven't hit a 0 yet, start going // card word by card word until we get to one that's not fully set (0xFFFF...) // or we reach card_word_end. if ((bit_position == card_word_width) && (last_card_word < &card_table [card_word_end-1])) { do { card_word_value = *(++last_card_word); } while ((last_card_word < &card_table [card_word_end-1]) && (card_word_value == ~0u /* (1 << card_word_width)-1 */)); bit_position = 0; } } while (card_word_value & 1); end_card = (last_card_word - &card_table [0])* card_word_width + bit_position; //dprintf (3, ("find_card: [%Ix, %Ix[ set", card, end_card)); dprintf (3, ("fc: [%Ix, %Ix[", card, end_card)); return TRUE; } //because of heap expansion, computing end is complicated. uint8_t* compute_next_end (heap_segment* seg, uint8_t* low) { if ((low >= heap_segment_mem (seg)) && (low < heap_segment_allocated (seg))) return low; else return heap_segment_allocated (seg); } #ifndef USE_REGIONS uint8_t* gc_heap::compute_next_boundary (int gen_number, BOOL relocating) { //when relocating, the fault line is the plan start of the younger //generation because the generation is promoted. if (relocating && (gen_number == (settings.condemned_generation + 1))) { generation* gen = generation_of (gen_number - 1); uint8_t* gen_alloc = generation_plan_allocation_start (gen); assert (gen_alloc); return gen_alloc; } else { assert (gen_number > settings.condemned_generation); return generation_allocation_start (generation_of (gen_number - 1 )); } } #endif //!USE_REGIONS // For regions - // n_gen means it's pointing into the condemned regions so it's incremented // if the child object's region is <= condemned_gen. // cg_pointers_found means it's pointing into a lower generation so it's incremented // if the child object's region is < current_gen. inline void gc_heap::mark_through_cards_helper (uint8_t** poo, size_t& n_gen, size_t& cg_pointers_found, card_fn fn, uint8_t* nhigh, uint8_t* next_boundary, int condemned_gen, // generation of the parent object int current_gen CARD_MARKING_STEALING_ARG(gc_heap* hpt)) { #if defined(FEATURE_CARD_MARKING_STEALING) && defined(MULTIPLE_HEAPS) int thread = hpt->heap_number; #else THREAD_FROM_HEAP; #ifdef MULTIPLE_HEAPS gc_heap* hpt = this; #endif //MULTIPLE_HEAPS #endif //FEATURE_CARD_MARKING_STEALING && MULTIPLE_HEAPS #ifdef USE_REGIONS assert (nhigh == 0); assert (next_boundary == 0); uint8_t* child_object = *poo; if (!is_in_heap_range (child_object)) return; int child_object_gen = get_region_gen_num (child_object); int saved_child_object_gen = child_object_gen; uint8_t* saved_child_object = child_object; if (child_object_gen <= condemned_gen) { n_gen++; call_fn(hpt,fn) (poo THREAD_NUMBER_ARG); } if (fn == &gc_heap::relocate_address) { child_object_gen = get_region_plan_gen_num (*poo); } if (child_object_gen < current_gen) { cg_pointers_found++; dprintf (4, ("cg pointer %Ix found, %Id so far", (size_t)*poo, cg_pointers_found )); } #else //USE_REGIONS assert (condemned_gen == -1); if ((gc_low <= *poo) && (gc_high > *poo)) { n_gen++; call_fn(hpt,fn) (poo THREAD_NUMBER_ARG); } #ifdef MULTIPLE_HEAPS else if (*poo) { gc_heap* hp = heap_of_gc (*poo); if (hp != this) { if ((hp->gc_low <= *poo) && (hp->gc_high > *poo)) { n_gen++; call_fn(hpt,fn) (poo THREAD_NUMBER_ARG); } if ((fn == &gc_heap::relocate_address) || ((hp->ephemeral_low <= *poo) && (hp->ephemeral_high > *poo))) { cg_pointers_found++; } } } #endif //MULTIPLE_HEAPS if ((next_boundary <= *poo) && (nhigh > *poo)) { cg_pointers_found ++; dprintf (4, ("cg pointer %Ix found, %Id so far", (size_t)*poo, cg_pointers_found )); } #endif //USE_REGIONS } BOOL gc_heap::card_transition (uint8_t* po, uint8_t* end, size_t card_word_end, size_t& cg_pointers_found, size_t& n_eph, size_t& n_card_set, size_t& card, size_t& end_card, BOOL& foundp, uint8_t*& start_address, uint8_t*& limit, size_t& n_cards_cleared CARD_MARKING_STEALING_ARGS(card_marking_enumerator& card_mark_enumerator, heap_segment* seg, size_t &card_word_end_out)) { dprintf (3, ("pointer %Ix past card %Ix, cg %Id", (size_t)po, (size_t)card, cg_pointers_found)); BOOL passed_end_card_p = FALSE; foundp = FALSE; if (cg_pointers_found == 0) { //dprintf(3,(" Clearing cards [%Ix, %Ix[ ", dprintf(3,(" CC [%Ix, %Ix[ ", (size_t)card_address(card), (size_t)po)); clear_cards (card, card_of(po)); n_card_set -= (card_of (po) - card); n_cards_cleared += (card_of (po) - card); } n_eph +=cg_pointers_found; cg_pointers_found = 0; card = card_of (po); if (card >= end_card) { passed_end_card_p = TRUE; dprintf (3, ("card %Ix exceeding end_card %Ix", (size_t)card, (size_t)end_card)); foundp = find_card (card_table, card, card_word_end, end_card); if (foundp) { n_card_set+= end_card - card; start_address = card_address (card); dprintf (3, ("NewC: %Ix, start: %Ix, end: %Ix", (size_t)card, (size_t)start_address, (size_t)card_address (end_card))); } limit = min (end, card_address (end_card)); #ifdef FEATURE_CARD_MARKING_STEALING // the card bit @ end_card should not be set // if end_card is still shy of the limit set by card_word_end assert(!((card_word(end_card) < card_word_end) && card_set_p(end_card))); if (!foundp) { card_word_end_out = 0; foundp = find_next_chunk(card_mark_enumerator, seg, n_card_set, start_address, limit, card, end_card, card_word_end_out); } #else // the card bit @ end_card should not be set - // find_card is supposed to terminate only when it finds a 0 bit // or the end of the segment assert (!((limit < end) && card_set_p (end_card))); #endif } return passed_end_card_p; } #ifdef FEATURE_CARD_MARKING_STEALING bool card_marking_enumerator::move_next(heap_segment* seg, uint8_t*& low, uint8_t*& high) { if (segment == nullptr) return false; uint32_t chunk_index = old_chunk_index; old_chunk_index = INVALID_CHUNK_INDEX; if (chunk_index == INVALID_CHUNK_INDEX) chunk_index = Interlocked::Increment((volatile int32_t *)chunk_index_counter); while (true) { uint32_t chunk_index_within_seg = chunk_index - segment_start_chunk_index; uint8_t* start = heap_segment_mem(segment); uint8_t* end = compute_next_end(segment, gc_low); uint8_t* aligned_start = (uint8_t*)((size_t)start & ~(CARD_MARKING_STEALING_GRANULARITY - 1)); size_t seg_size = end - aligned_start; uint32_t chunk_count_within_seg = (uint32_t)((seg_size + (CARD_MARKING_STEALING_GRANULARITY - 1)) / CARD_MARKING_STEALING_GRANULARITY); if (chunk_index_within_seg < chunk_count_within_seg) { if (seg == segment) { low = (chunk_index_within_seg == 0) ? start : (aligned_start + (size_t)chunk_index_within_seg * CARD_MARKING_STEALING_GRANULARITY); high = (chunk_index_within_seg + 1 == chunk_count_within_seg) ? end : (aligned_start + (size_t)(chunk_index_within_seg + 1) * CARD_MARKING_STEALING_GRANULARITY); chunk_high = high; dprintf (3, ("cme:mn ci: %u, low: %Ix, high: %Ix", chunk_index, low, high)); return true; } else { // we found the correct segment, but it's not the segment our caller is in // our caller should still be in one of the previous segments #ifdef _DEBUG for (heap_segment* cur_seg = seg; cur_seg != segment; cur_seg = heap_segment_next_in_range(cur_seg)) { assert(cur_seg); } #endif //_DEBUG // keep the chunk index for later old_chunk_index = chunk_index; dprintf (3, ("cme:mn oci: %u, seg mismatch seg: %Ix, segment: %Ix", old_chunk_index, heap_segment_mem (segment), heap_segment_mem (seg))); return false; } } segment = heap_segment_next_in_range(segment); segment_start_chunk_index += chunk_count_within_seg; if (segment == nullptr) { // keep the chunk index for later old_chunk_index = chunk_index; dprintf (3, ("cme:mn oci: %u no more segments", old_chunk_index)); return false; } } } bool gc_heap::find_next_chunk(card_marking_enumerator& card_mark_enumerator, heap_segment* seg, size_t& n_card_set, uint8_t*& start_address, uint8_t*& limit, size_t& card, size_t& end_card, size_t& card_word_end) { while (true) { if (card_word_end != 0 && find_card(card_table, card, card_word_end, end_card)) { assert(end_card <= card_word_end * card_word_width); n_card_set += end_card - card; start_address = card_address(card); dprintf(3, ("NewC: %Ix, start: %Ix, end: %Ix", (size_t)card, (size_t)start_address, (size_t)card_address(end_card))); limit = min(card_mark_enumerator.get_chunk_high(), card_address(end_card)); dprintf (3, ("New run of cards on heap %d: [%Ix,%Ix[", heap_number, (size_t)start_address, (size_t)limit)); return true; } // we have exhausted this chunk, get the next one uint8_t* chunk_low = nullptr; uint8_t* chunk_high = nullptr; if (!card_mark_enumerator.move_next(seg, chunk_low, chunk_high)) { dprintf (3, ("No more chunks on heap %d\n", heap_number)); return false; } card = max(card, card_of(chunk_low)); card_word_end = (card_of(align_on_card_word(chunk_high)) / card_word_width); dprintf (3, ("Moved to next chunk on heap %d: [%Ix,%Ix[", heap_number, (size_t)chunk_low, (size_t)chunk_high)); } } #endif // FEATURE_CARD_MARKING_STEALING void gc_heap::mark_through_cards_for_segments (card_fn fn, BOOL relocating CARD_MARKING_STEALING_ARG(gc_heap* hpt)) { #ifdef BACKGROUND_GC #ifdef USE_REGIONS dprintf (3, ("current_sweep_pos is %Ix", current_sweep_pos)); #else dprintf (3, ("current_sweep_pos is %Ix, saved_sweep_ephemeral_seg is %Ix(%Ix)", current_sweep_pos, saved_sweep_ephemeral_seg, saved_sweep_ephemeral_start)); #endif //USE_REGIONS for (int i = get_start_generation_index(); i < max_generation; i++) { heap_segment* soh_seg = heap_segment_rw (generation_start_segment (generation_of (i))); PREFIX_ASSUME(soh_seg != NULL); while (soh_seg) { dprintf (3, ("seg %Ix, bgc_alloc: %Ix, alloc: %Ix", soh_seg, heap_segment_background_allocated (soh_seg), heap_segment_allocated (soh_seg))); soh_seg = heap_segment_next_rw (soh_seg); } } #endif //BACKGROUND_GC size_t end_card = 0; generation* oldest_gen = generation_of (max_generation); int curr_gen_number = max_generation; // Note - condemned_gen is only needed for regions and the other 2 are // only for if USE_REGIONS is not defined, but I need to pass them to a // function inside the macro below so just assert they are the unused values. #ifdef USE_REGIONS uint8_t* low = 0; uint8_t* gen_boundary = 0; uint8_t* next_boundary = 0; int condemned_gen = settings.condemned_generation; uint8_t* nhigh = 0; #else uint8_t* low = gc_low; uint8_t* high = gc_high; uint8_t* gen_boundary = generation_allocation_start(generation_of(curr_gen_number - 1)); uint8_t* next_boundary = compute_next_boundary(curr_gen_number, relocating); int condemned_gen = -1; uint8_t* nhigh = (relocating ? heap_segment_plan_allocated (ephemeral_heap_segment) : high); #endif //USE_REGIONS heap_segment* seg = heap_segment_rw (generation_start_segment (oldest_gen)); PREFIX_ASSUME(seg != NULL); uint8_t* beg = get_soh_start_object (seg, oldest_gen); uint8_t* end = compute_next_end (seg, low); uint8_t* last_object = beg; size_t cg_pointers_found = 0; size_t card_word_end = (card_of (align_on_card_word (end)) / card_word_width); size_t n_eph = 0; size_t n_gen = 0; size_t n_card_set = 0; BOOL foundp = FALSE; uint8_t* start_address = 0; uint8_t* limit = 0; size_t card = card_of (beg); #ifdef BACKGROUND_GC BOOL consider_bgc_mark_p = FALSE; BOOL check_current_sweep_p = FALSE; BOOL check_saved_sweep_p = FALSE; should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p); #endif //BACKGROUND_GC dprintf(3, ("CMs: %Ix->%Ix", (size_t)beg, (size_t)end)); size_t total_cards_cleared = 0; #ifdef FEATURE_CARD_MARKING_STEALING card_marking_enumerator card_mark_enumerator (seg, low, (VOLATILE(uint32_t)*)&card_mark_chunk_index_soh); card_word_end = 0; #endif // FEATURE_CARD_MARKING_STEALING while (1) { if (card_of(last_object) > card) { dprintf (3, ("Found %Id cg pointers", cg_pointers_found)); if (cg_pointers_found == 0) { uint8_t* last_object_processed = last_object; #ifdef FEATURE_CARD_MARKING_STEALING last_object_processed = min(limit, last_object); #endif // FEATURE_CARD_MARKING_STEALING dprintf (3, (" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card), (size_t)last_object_processed)); clear_cards(card, card_of(last_object_processed)); n_card_set -= (card_of(last_object_processed) - card); total_cards_cleared += (card_of(last_object_processed) - card); } n_eph += cg_pointers_found; cg_pointers_found = 0; card = card_of (last_object); } if (card >= end_card) { #ifdef FEATURE_CARD_MARKING_STEALING // find another chunk with some cards set foundp = find_next_chunk(card_mark_enumerator, seg, n_card_set, start_address, limit, card, end_card, card_word_end); #else // FEATURE_CARD_MARKING_STEALING foundp = find_card(card_table, card, card_word_end, end_card); if (foundp) { n_card_set += end_card - card; start_address = max (beg, card_address (card)); } limit = min (end, card_address (end_card)); #endif // FEATURE_CARD_MARKING_STEALING } if (!foundp || (last_object >= end) || (card_address (card) >= end)) { if (foundp && (cg_pointers_found == 0)) { #ifndef USE_REGIONS // in the segment case, need to recompute end_card so we don't clear cards // for the next generation end_card = card_of (end); #endif dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card), (size_t)card_address(end_card))); clear_cards (card, end_card); n_card_set -= (end_card - card); total_cards_cleared += (end_card - card); } n_eph += cg_pointers_found; cg_pointers_found = 0; #ifdef FEATURE_CARD_MARKING_STEALING // we have decided to move to the next segment - make sure we exhaust the chunk enumerator for this segment card_mark_enumerator.exhaust_segment(seg); #endif // FEATURE_CARD_MARKING_STEALING seg = heap_segment_next_in_range (seg); #ifdef USE_REGIONS if (!seg) { curr_gen_number--; if (curr_gen_number > condemned_gen) { // Switch to regions for this generation. seg = generation_start_segment (generation_of (curr_gen_number)); #ifdef FEATURE_CARD_MARKING_STEALING card_mark_enumerator.switch_to_segment(seg); #endif // FEATURE_CARD_MARKING_STEALING dprintf (REGIONS_LOG, ("h%d switching to gen%d start seg %Ix", heap_number, curr_gen_number, (size_t)seg)); } } #endif //USE_REGIONS if (seg) { #ifdef BACKGROUND_GC should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p); #endif //BACKGROUND_GC beg = heap_segment_mem (seg); #ifdef USE_REGIONS end = heap_segment_allocated (seg); #else end = compute_next_end (seg, low); #endif //USE_REGIONS #ifdef FEATURE_CARD_MARKING_STEALING card_word_end = 0; #else // FEATURE_CARD_MARKING_STEALING card_word_end = card_of (align_on_card_word (end)) / card_word_width; #endif // FEATURE_CARD_MARKING_STEALING card = card_of (beg); last_object = beg; end_card = 0; continue; } else { break; } } assert (card_set_p (card)); { uint8_t* o = last_object; o = find_first_object (start_address, last_object); // Never visit an object twice. assert (o >= last_object); #ifndef USE_REGIONS //dprintf(3,("Considering card %Ix start object: %Ix, %Ix[ boundary: %Ix", dprintf(3, ("c: %Ix, o: %Ix, l: %Ix[ boundary: %Ix", card, (size_t)o, (size_t)limit, (size_t)gen_boundary)); #endif //USE_REGIONS while (o < limit) { assert (Align (size (o)) >= Align (min_obj_size)); size_t s = size (o); // next_o is the next object in the heap walk uint8_t* next_o = o + Align (s); // while cont_o is the object we should continue with at the end_object label uint8_t* cont_o = next_o; Prefetch (next_o); #ifndef USE_REGIONS if ((o >= gen_boundary) && (seg == ephemeral_heap_segment)) { dprintf (3, ("switching gen boundary %Ix", (size_t)gen_boundary)); curr_gen_number--; assert ((curr_gen_number > 0)); gen_boundary = generation_allocation_start (generation_of (curr_gen_number - 1)); next_boundary = (compute_next_boundary (curr_gen_number, relocating)); } #endif //!USE_REGIONS dprintf (4, ("|%Ix|", (size_t)o)); if (next_o < start_address) { goto end_object; } #ifdef BACKGROUND_GC if (!fgc_should_consider_object (o, seg, consider_bgc_mark_p, check_current_sweep_p, check_saved_sweep_p)) { goto end_object; } #endif //BACKGROUND_GC #ifdef COLLECTIBLE_CLASS if (is_collectible(o)) { BOOL passed_end_card_p = FALSE; if (card_of (o) > card) { passed_end_card_p = card_transition (o, end, card_word_end, cg_pointers_found, n_eph, n_card_set, card, end_card, foundp, start_address, limit, total_cards_cleared CARD_MARKING_STEALING_ARGS(card_mark_enumerator, seg, card_word_end)); } if ((!passed_end_card_p || foundp) && (card_of (o) == card)) { // card is valid and it covers the head of the object if (fn == &gc_heap::relocate_address) { cg_pointers_found++; } else { uint8_t* class_obj = get_class_object (o); mark_through_cards_helper (&class_obj, n_gen, cg_pointers_found, fn, nhigh, next_boundary, condemned_gen, curr_gen_number CARD_MARKING_STEALING_ARG(hpt)); } } if (passed_end_card_p) { if (foundp && (card_address (card) < next_o)) { goto go_through_refs; } else if (foundp && (start_address < limit)) { cont_o = find_first_object (start_address, o); goto end_object; } else goto end_limit; } } go_through_refs: #endif //COLLECTIBLE_CLASS if (contain_pointers (o)) { dprintf(3,("Going through %Ix start_address: %Ix", (size_t)o, (size_t)start_address)); { dprintf (4, ("normal object path")); go_through_object (method_table(o), o, s, poo, start_address, use_start, (o + s), { dprintf (4, ("<%Ix>:%Ix", (size_t)poo, (size_t)*poo)); if (card_of ((uint8_t*)poo) > card) { BOOL passed_end_card_p = card_transition ((uint8_t*)poo, end, card_word_end, cg_pointers_found, n_eph, n_card_set, card, end_card, foundp, start_address, limit, total_cards_cleared CARD_MARKING_STEALING_ARGS(card_mark_enumerator, seg, card_word_end)); if (passed_end_card_p) { if (foundp && (card_address (card) < next_o)) { //new_start(); { if (ppstop <= (uint8_t**)start_address) {break;} else if (poo < (uint8_t**)start_address) {poo = (uint8_t**)start_address;} } } else if (foundp && (start_address < limit)) { cont_o = find_first_object (start_address, o); goto end_object; } else goto end_limit; } } mark_through_cards_helper (poo, n_gen, cg_pointers_found, fn, nhigh, next_boundary, condemned_gen, curr_gen_number CARD_MARKING_STEALING_ARG(hpt)); } ); } } end_object: if (((size_t)next_o / brick_size) != ((size_t) o / brick_size)) { if (brick_table [brick_of (o)] <0) fix_brick_to_highest (o, next_o); } o = cont_o; } end_limit: last_object = o; } } // compute the efficiency ratio of the card table if (!relocating) { #ifdef FEATURE_CARD_MARKING_STEALING Interlocked::ExchangeAddPtr(&n_eph_soh, n_eph); Interlocked::ExchangeAddPtr(&n_gen_soh, n_gen); dprintf (3, ("h%d marking h%d Msoh: cross: %Id, useful: %Id, cards set: %Id, cards cleared: %Id, ratio: %d", hpt->heap_number, heap_number, n_eph, n_gen, n_card_set, total_cards_cleared, (n_eph ? (int)(((float)n_gen / (float)n_eph) * 100) : 0))); dprintf (3, ("h%d marking h%d Msoh: total cross %Id, useful: %Id, running ratio: %d", hpt->heap_number, heap_number, (size_t)n_eph_soh, (size_t)n_gen_soh, (n_eph_soh ? (int)(((float)n_gen_soh / (float)n_eph_soh) * 100) : 0))); #else generation_skip_ratio = ((n_eph > MIN_SOH_CROSS_GEN_REFS) ? (int)(((float)n_gen / (float)n_eph) * 100) : 100); dprintf (3, ("marking h%d Msoh: cross: %Id, useful: %Id, cards set: %Id, cards cleared: %Id, ratio: %d", heap_number, n_eph, n_gen, n_card_set, total_cards_cleared, generation_skip_ratio)); #endif //FEATURE_CARD_MARKING_STEALING } else { dprintf (3, ("R: Msoh: cross: %Id, useful: %Id, cards set: %Id, cards cleared: %Id, ratio: %d", n_gen, n_eph, n_card_set, total_cards_cleared, generation_skip_ratio)); } } #ifndef USE_REGIONS #ifdef SEG_REUSE_STATS size_t gc_heap::dump_buckets (size_t* ordered_indices, int count, size_t* total_size) { size_t total_items = 0; *total_size = 0; for (int i = 0; i < count; i++) { total_items += ordered_indices[i]; *total_size += ordered_indices[i] << (MIN_INDEX_POWER2 + i); dprintf (SEG_REUSE_LOG_0, ("[%d]%4d 2^%2d", heap_number, ordered_indices[i], (MIN_INDEX_POWER2 + i))); } dprintf (SEG_REUSE_LOG_0, ("[%d]Total %d items, total size is 0x%Ix", heap_number, total_items, *total_size)); return total_items; } #endif // SEG_REUSE_STATS void gc_heap::count_plug (size_t last_plug_size, uint8_t*& last_plug) { // detect pinned plugs if (!pinned_plug_que_empty_p() && (last_plug == pinned_plug (oldest_pin()))) { deque_pinned_plug(); update_oldest_pinned_plug(); dprintf (3, ("deque pin,now oldest pin is %Ix", pinned_plug (oldest_pin()))); } else { size_t plug_size = last_plug_size + Align(min_obj_size); BOOL is_padded = FALSE; #ifdef SHORT_PLUGS plug_size += Align (min_obj_size); is_padded = TRUE; #endif //SHORT_PLUGS #ifdef RESPECT_LARGE_ALIGNMENT plug_size += switch_alignment_size (is_padded); #endif //RESPECT_LARGE_ALIGNMENT total_ephemeral_plugs += plug_size; size_t plug_size_power2 = round_up_power2 (plug_size); ordered_plug_indices[relative_index_power2_plug (plug_size_power2)]++; dprintf (SEG_REUSE_LOG_1, ("[%d]count_plug: adding 0x%Ix - %Id (2^%d) to ordered plug array", heap_number, last_plug, plug_size, (relative_index_power2_plug (plug_size_power2) + MIN_INDEX_POWER2))); } } void gc_heap::count_plugs_in_brick (uint8_t* tree, uint8_t*& last_plug) { assert ((tree != NULL)); if (node_left_child (tree)) { count_plugs_in_brick (tree + node_left_child (tree), last_plug); } if (last_plug != 0) { uint8_t* plug = tree; size_t gap_size = node_gap_size (plug); uint8_t* gap = (plug - gap_size); uint8_t* last_plug_end = gap; size_t last_plug_size = (last_plug_end - last_plug); dprintf (3, ("tree: %Ix, last plug: %Ix, gap size: %Ix, gap: %Ix, last plug size: %Ix", tree, last_plug, gap_size, gap, last_plug_size)); if (tree == oldest_pinned_plug) { dprintf (3, ("tree %Ix is pinned, last plug is %Ix, size is %Ix", tree, last_plug, last_plug_size)); mark* m = oldest_pin(); if (m->has_pre_plug_info()) { last_plug_size += sizeof (gap_reloc_pair); dprintf (3, ("pin %Ix has pre plug, adjusting plug size to %Ix", tree, last_plug_size)); } } // Can't assert here - if it's a pinned plug it can be less. //assert (last_plug_size >= Align (min_obj_size)); count_plug (last_plug_size, last_plug); } last_plug = tree; if (node_right_child (tree)) { count_plugs_in_brick (tree + node_right_child (tree), last_plug); } } void gc_heap::build_ordered_plug_indices () { memset (ordered_plug_indices, 0, sizeof(ordered_plug_indices)); memset (saved_ordered_plug_indices, 0, sizeof(saved_ordered_plug_indices)); uint8_t* start_address = generation_limit (max_generation); uint8_t* end_address = heap_segment_allocated (ephemeral_heap_segment); size_t current_brick = brick_of (start_address); size_t end_brick = brick_of (end_address - 1); uint8_t* last_plug = 0; //Look for the right pinned plug to start from. reset_pinned_queue_bos(); while (!pinned_plug_que_empty_p()) { mark* m = oldest_pin(); if ((m->first >= start_address) && (m->first < end_address)) { dprintf (3, ("found a pin %Ix between %Ix and %Ix", m->first, start_address, end_address)); break; } else deque_pinned_plug(); } update_oldest_pinned_plug(); while (current_brick <= end_brick) { int brick_entry = brick_table [ current_brick ]; if (brick_entry >= 0) { count_plugs_in_brick (brick_address (current_brick) + brick_entry -1, last_plug); } current_brick++; } if (last_plug !=0) { count_plug (end_address - last_plug, last_plug); } // we need to make sure that after fitting all the existing plugs, we // have big enough free space left to guarantee that the next allocation // will succeed. size_t extra_size = END_SPACE_AFTER_GC_FL; total_ephemeral_plugs += extra_size; dprintf (SEG_REUSE_LOG_0, ("Making sure we can fit a large object after fitting all plugs")); ordered_plug_indices[relative_index_power2_plug (round_up_power2 (extra_size))]++; memcpy (saved_ordered_plug_indices, ordered_plug_indices, sizeof(ordered_plug_indices)); #ifdef SEG_REUSE_STATS dprintf (SEG_REUSE_LOG_0, ("Plugs:")); size_t total_plug_power2 = 0; dump_buckets (ordered_plug_indices, MAX_NUM_BUCKETS, &total_plug_power2); dprintf (SEG_REUSE_LOG_0, ("plugs: 0x%Ix (rounded up to 0x%Ix (%d%%))", total_ephemeral_plugs, total_plug_power2, (total_ephemeral_plugs ? (total_plug_power2 * 100 / total_ephemeral_plugs) : 0))); dprintf (SEG_REUSE_LOG_0, ("-------------------")); #endif // SEG_REUSE_STATS } void gc_heap::init_ordered_free_space_indices () { memset (ordered_free_space_indices, 0, sizeof(ordered_free_space_indices)); memset (saved_ordered_free_space_indices, 0, sizeof(saved_ordered_free_space_indices)); } void gc_heap::trim_free_spaces_indices () { trimmed_free_space_index = -1; size_t max_count = max_free_space_items - 1; size_t count = 0; int i = 0; for (i = (MAX_NUM_BUCKETS - 1); i >= 0; i--) { count += ordered_free_space_indices[i]; if (count >= max_count) { break; } } ptrdiff_t extra_free_space_items = count - max_count; if (extra_free_space_items > 0) { ordered_free_space_indices[i] -= extra_free_space_items; free_space_items = max_count; trimmed_free_space_index = i; } else { free_space_items = count; } if (i == -1) { i = 0; } free_space_buckets = MAX_NUM_BUCKETS - i; for (--i; i >= 0; i--) { ordered_free_space_indices[i] = 0; } memcpy (saved_ordered_free_space_indices, ordered_free_space_indices, sizeof(ordered_free_space_indices)); } // We fit as many plugs as we can and update the number of plugs left and the number // of free spaces left. BOOL gc_heap::can_fit_in_spaces_p (size_t* ordered_blocks, int small_index, size_t* ordered_spaces, int big_index) { assert (small_index <= big_index); assert (big_index < MAX_NUM_BUCKETS); size_t small_blocks = ordered_blocks[small_index]; if (small_blocks == 0) { return TRUE; } size_t big_spaces = ordered_spaces[big_index]; if (big_spaces == 0) { return FALSE; } dprintf (SEG_REUSE_LOG_1, ("[%d]Fitting %Id 2^%d plugs into %Id 2^%d free spaces", heap_number, small_blocks, (small_index + MIN_INDEX_POWER2), big_spaces, (big_index + MIN_INDEX_POWER2))); size_t big_to_small = big_spaces << (big_index - small_index); ptrdiff_t extra_small_spaces = big_to_small - small_blocks; dprintf (SEG_REUSE_LOG_1, ("[%d]%d 2^%d spaces can fit %d 2^%d blocks", heap_number, big_spaces, (big_index + MIN_INDEX_POWER2), big_to_small, (small_index + MIN_INDEX_POWER2))); BOOL can_fit = (extra_small_spaces >= 0); if (can_fit) { dprintf (SEG_REUSE_LOG_1, ("[%d]Can fit with %d 2^%d extras blocks", heap_number, extra_small_spaces, (small_index + MIN_INDEX_POWER2))); } int i = 0; dprintf (SEG_REUSE_LOG_1, ("[%d]Setting # of 2^%d spaces to 0", heap_number, (big_index + MIN_INDEX_POWER2))); ordered_spaces[big_index] = 0; if (extra_small_spaces > 0) { dprintf (SEG_REUSE_LOG_1, ("[%d]Setting # of 2^%d blocks to 0", heap_number, (small_index + MIN_INDEX_POWER2))); ordered_blocks[small_index] = 0; for (i = small_index; i < big_index; i++) { if (extra_small_spaces & 1) { dprintf (SEG_REUSE_LOG_1, ("[%d]Increasing # of 2^%d spaces from %d to %d", heap_number, (i + MIN_INDEX_POWER2), ordered_spaces[i], (ordered_spaces[i] + 1))); ordered_spaces[i] += 1; } extra_small_spaces >>= 1; } dprintf (SEG_REUSE_LOG_1, ("[%d]Finally increasing # of 2^%d spaces from %d to %d", heap_number, (i + MIN_INDEX_POWER2), ordered_spaces[i], (ordered_spaces[i] + extra_small_spaces))); ordered_spaces[i] += extra_small_spaces; } else { dprintf (SEG_REUSE_LOG_1, ("[%d]Decreasing # of 2^%d blocks from %d to %d", heap_number, (small_index + MIN_INDEX_POWER2), ordered_blocks[small_index], (ordered_blocks[small_index] - big_to_small))); ordered_blocks[small_index] -= big_to_small; } #ifdef SEG_REUSE_STATS size_t temp; dprintf (SEG_REUSE_LOG_1, ("[%d]Plugs became:", heap_number)); dump_buckets (ordered_blocks, MAX_NUM_BUCKETS, &temp); dprintf (SEG_REUSE_LOG_1, ("[%d]Free spaces became:", heap_number)); dump_buckets (ordered_spaces, MAX_NUM_BUCKETS, &temp); #endif //SEG_REUSE_STATS return can_fit; } // space_index gets updated to the biggest available space index. BOOL gc_heap::can_fit_blocks_p (size_t* ordered_blocks, int block_index, size_t* ordered_spaces, int* space_index) { assert (*space_index >= block_index); while (!can_fit_in_spaces_p (ordered_blocks, block_index, ordered_spaces, *space_index)) { (*space_index)--; if (*space_index < block_index) { return FALSE; } } return TRUE; } BOOL gc_heap::can_fit_all_blocks_p (size_t* ordered_blocks, size_t* ordered_spaces, int count) { #ifdef FEATURE_STRUCTALIGN // BARTOKTODO (4841): reenable when can_fit_in_spaces_p takes alignment requirements into account return FALSE; #endif // FEATURE_STRUCTALIGN int space_index = count - 1; for (int block_index = (count - 1); block_index >= 0; block_index--) { if (!can_fit_blocks_p (ordered_blocks, block_index, ordered_spaces, &space_index)) { return FALSE; } } return TRUE; } void gc_heap::build_ordered_free_spaces (heap_segment* seg) { assert (bestfit_seg); //bestfit_seg->add_buckets (MAX_NUM_BUCKETS - free_space_buckets + MIN_INDEX_POWER2, // ordered_free_space_indices + (MAX_NUM_BUCKETS - free_space_buckets), // free_space_buckets, // free_space_items); bestfit_seg->add_buckets (MIN_INDEX_POWER2, ordered_free_space_indices, MAX_NUM_BUCKETS, free_space_items); assert (settings.condemned_generation == max_generation); uint8_t* first_address = heap_segment_mem (seg); uint8_t* end_address = heap_segment_reserved (seg); //look through the pinned plugs for relevant ones. //Look for the right pinned plug to start from. reset_pinned_queue_bos(); mark* m = 0; // See comment in can_expand_into_p why we need this size. size_t eph_gen_starts = eph_gen_starts_size + Align (min_obj_size); BOOL has_fit_gen_starts = FALSE; while (!pinned_plug_que_empty_p()) { m = oldest_pin(); if ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address) && (pinned_len (m) >= eph_gen_starts)) { assert ((pinned_plug (m) - pinned_len (m)) == bestfit_first_pin); break; } else { deque_pinned_plug(); } } if (!pinned_plug_que_empty_p()) { bestfit_seg->add ((void*)m, TRUE, TRUE); deque_pinned_plug(); m = oldest_pin(); has_fit_gen_starts = TRUE; } while (!pinned_plug_que_empty_p() && ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address))) { bestfit_seg->add ((void*)m, TRUE, FALSE); deque_pinned_plug(); m = oldest_pin(); } if (commit_end_of_seg) { if (!has_fit_gen_starts) { assert (bestfit_first_pin == heap_segment_plan_allocated (seg)); } bestfit_seg->add ((void*)seg, FALSE, (!has_fit_gen_starts)); } #ifdef _DEBUG bestfit_seg->check(); #endif //_DEBUG } BOOL gc_heap::try_best_fit (BOOL end_of_segment_p) { if (!end_of_segment_p) { trim_free_spaces_indices (); } BOOL can_bestfit = can_fit_all_blocks_p (ordered_plug_indices, ordered_free_space_indices, MAX_NUM_BUCKETS); return can_bestfit; } BOOL gc_heap::best_fit (size_t free_space, size_t largest_free_space, size_t additional_space, BOOL* use_additional_space) { dprintf (SEG_REUSE_LOG_0, ("gen%d: trying best fit mechanism", settings.condemned_generation)); assert (!additional_space || (additional_space && use_additional_space)); if (use_additional_space) { *use_additional_space = FALSE; } if (ordered_plug_indices_init == FALSE) { total_ephemeral_plugs = 0; build_ordered_plug_indices(); ordered_plug_indices_init = TRUE; } else { memcpy (ordered_plug_indices, saved_ordered_plug_indices, sizeof(ordered_plug_indices)); } if (total_ephemeral_plugs == END_SPACE_AFTER_GC_FL) { dprintf (SEG_REUSE_LOG_0, ("No ephemeral plugs to realloc, done")); size_t empty_eph = (END_SPACE_AFTER_GC_FL + (Align (min_obj_size)) * (max_generation + 1)); BOOL can_fit_empty_eph = (largest_free_space >= empty_eph); if (!can_fit_empty_eph) { can_fit_empty_eph = (additional_space >= empty_eph); if (can_fit_empty_eph) { *use_additional_space = TRUE; } } return can_fit_empty_eph; } if ((total_ephemeral_plugs + approximate_new_allocation()) >= (free_space + additional_space)) { dprintf (SEG_REUSE_LOG_0, ("We won't have enough free space left in this segment after fitting, done")); return FALSE; } if ((free_space + additional_space) == 0) { dprintf (SEG_REUSE_LOG_0, ("No free space in this segment, done")); return FALSE; } #ifdef SEG_REUSE_STATS dprintf (SEG_REUSE_LOG_0, ("Free spaces:")); size_t total_free_space_power2 = 0; size_t total_free_space_items = dump_buckets (ordered_free_space_indices, MAX_NUM_BUCKETS, &total_free_space_power2); dprintf (SEG_REUSE_LOG_0, ("currently max free spaces is %Id", max_free_space_items)); dprintf (SEG_REUSE_LOG_0, ("Ephemeral plugs: 0x%Ix, free space: 0x%Ix (rounded down to 0x%Ix (%Id%%)), additional free_space: 0x%Ix", total_ephemeral_plugs, free_space, total_free_space_power2, (free_space ? (total_free_space_power2 * 100 / free_space) : 0), additional_space)); size_t saved_all_free_space_indices[MAX_NUM_BUCKETS]; memcpy (saved_all_free_space_indices, ordered_free_space_indices, sizeof(saved_all_free_space_indices)); #endif // SEG_REUSE_STATS if (total_ephemeral_plugs > (free_space + additional_space)) { return FALSE; } use_bestfit = try_best_fit(FALSE); if (!use_bestfit && additional_space) { int relative_free_space_index = relative_index_power2_free_space (round_down_power2 (additional_space)); if (relative_free_space_index != -1) { int relative_plug_index = 0; size_t plugs_to_fit = 0; for (relative_plug_index = (MAX_NUM_BUCKETS - 1); relative_plug_index >= 0; relative_plug_index--) { plugs_to_fit = ordered_plug_indices[relative_plug_index]; if (plugs_to_fit != 0) { break; } } if ((relative_plug_index > relative_free_space_index) || ((relative_plug_index == relative_free_space_index) && (plugs_to_fit > 1))) { #ifdef SEG_REUSE_STATS dprintf (SEG_REUSE_LOG_0, ("additional space is 2^%d but we stopped at %d 2^%d plug(s)", (relative_free_space_index + MIN_INDEX_POWER2), plugs_to_fit, (relative_plug_index + MIN_INDEX_POWER2))); #endif // SEG_REUSE_STATS goto adjust; } dprintf (SEG_REUSE_LOG_0, ("Adding end of segment (2^%d)", (relative_free_space_index + MIN_INDEX_POWER2))); ordered_free_space_indices[relative_free_space_index]++; use_bestfit = try_best_fit(TRUE); if (use_bestfit) { free_space_items++; // Since we might've trimmed away some of the free spaces we had, we should see // if we really need to use end of seg space - if it's the same or smaller than // the largest space we trimmed we can just add that one back instead of // using end of seg. if (relative_free_space_index > trimmed_free_space_index) { *use_additional_space = TRUE; } else { // If the addition space is <= than the last trimmed space, we // should just use that last trimmed space instead. saved_ordered_free_space_indices[trimmed_free_space_index]++; } } } } adjust: if (!use_bestfit) { dprintf (SEG_REUSE_LOG_0, ("couldn't fit...")); #ifdef SEG_REUSE_STATS size_t saved_max = max_free_space_items; BOOL temp_bestfit = FALSE; dprintf (SEG_REUSE_LOG_0, ("----Starting experiment process----")); dprintf (SEG_REUSE_LOG_0, ("----Couldn't fit with max free items %Id", max_free_space_items)); // TODO: need to take the end of segment into consideration. while (max_free_space_items <= total_free_space_items) { max_free_space_items += max_free_space_items / 2; dprintf (SEG_REUSE_LOG_0, ("----Temporarily increasing max free spaces to %Id", max_free_space_items)); memcpy (ordered_free_space_indices, saved_all_free_space_indices, sizeof(ordered_free_space_indices)); if (try_best_fit(FALSE)) { temp_bestfit = TRUE; break; } } if (temp_bestfit) { dprintf (SEG_REUSE_LOG_0, ("----With %Id max free spaces we could fit", max_free_space_items)); } else { dprintf (SEG_REUSE_LOG_0, ("----Tried all free spaces and still couldn't fit, lost too much space")); } dprintf (SEG_REUSE_LOG_0, ("----Restoring max free spaces to %Id", saved_max)); max_free_space_items = saved_max; #endif // SEG_REUSE_STATS if (free_space_items) { max_free_space_items = min (MAX_NUM_FREE_SPACES, free_space_items * 2); max_free_space_items = max (max_free_space_items, MIN_NUM_FREE_SPACES); } else { max_free_space_items = MAX_NUM_FREE_SPACES; } } dprintf (SEG_REUSE_LOG_0, ("Adjusted number of max free spaces to %Id", max_free_space_items)); dprintf (SEG_REUSE_LOG_0, ("------End of best fitting process------\n")); return use_bestfit; } BOOL gc_heap::process_free_space (heap_segment* seg, size_t free_space, size_t min_free_size, size_t min_cont_size, size_t* total_free_space, size_t* largest_free_space) { *total_free_space += free_space; *largest_free_space = max (*largest_free_space, free_space); #ifdef SIMPLE_DPRINTF dprintf (SEG_REUSE_LOG_1, ("free space len: %Ix, total free space: %Ix, largest free space: %Ix", free_space, *total_free_space, *largest_free_space)); #endif //SIMPLE_DPRINTF if ((*total_free_space >= min_free_size) && (*largest_free_space >= min_cont_size)) { #ifdef SIMPLE_DPRINTF dprintf (SEG_REUSE_LOG_0, ("(gen%d)total free: %Ix(min: %Ix), largest free: %Ix(min: %Ix). Found segment %Ix to reuse without bestfit", settings.condemned_generation, *total_free_space, min_free_size, *largest_free_space, min_cont_size, (size_t)seg)); #else UNREFERENCED_PARAMETER(seg); #endif //SIMPLE_DPRINTF return TRUE; } int free_space_index = relative_index_power2_free_space (round_down_power2 (free_space)); if (free_space_index != -1) { ordered_free_space_indices[free_space_index]++; } return FALSE; } BOOL gc_heap::can_expand_into_p (heap_segment* seg, size_t min_free_size, size_t min_cont_size, allocator* gen_allocator) { min_cont_size += END_SPACE_AFTER_GC; use_bestfit = FALSE; commit_end_of_seg = FALSE; bestfit_first_pin = 0; uint8_t* first_address = heap_segment_mem (seg); uint8_t* end_address = heap_segment_reserved (seg); size_t end_extra_space = end_space_after_gc(); if ((heap_segment_reserved (seg) - end_extra_space) <= heap_segment_plan_allocated (seg)) { dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p: can't use segment [%Ix %Ix, has less than %d bytes at the end", first_address, end_address, end_extra_space)); return FALSE; } end_address -= end_extra_space; dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p(gen%d): min free: %Ix, min continuous: %Ix", settings.condemned_generation, min_free_size, min_cont_size)); size_t eph_gen_starts = eph_gen_starts_size; if (settings.condemned_generation == max_generation) { size_t free_space = 0; size_t largest_free_space = free_space; dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p: gen2: testing segment [%Ix %Ix", first_address, end_address)); //Look through the pinned plugs for relevant ones and Look for the right pinned plug to start from. //We are going to allocate the generation starts in the 1st free space, //so start from the first free space that's big enough for gen starts and a min object size. // If we see a free space that is >= gen starts but < gen starts + min obj size we just don't use it - // we could use it by allocating the last generation start a bit bigger but // the complexity isn't worth the effort (those plugs are from gen2 // already anyway). reset_pinned_queue_bos(); mark* m = 0; BOOL has_fit_gen_starts = FALSE; init_ordered_free_space_indices (); while (!pinned_plug_que_empty_p()) { m = oldest_pin(); if ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address) && (pinned_len (m) >= (eph_gen_starts + Align (min_obj_size)))) { break; } else { deque_pinned_plug(); } } if (!pinned_plug_que_empty_p()) { bestfit_first_pin = pinned_plug (m) - pinned_len (m); if (process_free_space (seg, pinned_len (m) - eph_gen_starts, min_free_size, min_cont_size, &free_space, &largest_free_space)) { return TRUE; } deque_pinned_plug(); m = oldest_pin(); has_fit_gen_starts = TRUE; } dprintf (3, ("first pin is %Ix", pinned_plug (m))); //tally up free space while (!pinned_plug_que_empty_p() && ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address))) { dprintf (3, ("looking at pin %Ix", pinned_plug (m))); if (process_free_space (seg, pinned_len (m), min_free_size, min_cont_size, &free_space, &largest_free_space)) { return TRUE; } deque_pinned_plug(); m = oldest_pin(); } //try to find space at the end of the segment. size_t end_space = (end_address - heap_segment_plan_allocated (seg)); size_t additional_space = ((min_free_size > free_space) ? (min_free_size - free_space) : 0); dprintf (SEG_REUSE_LOG_0, ("end space: %Ix; additional: %Ix", end_space, additional_space)); if (end_space >= additional_space) { BOOL can_fit = TRUE; commit_end_of_seg = TRUE; if (largest_free_space < min_cont_size) { if (end_space >= min_cont_size) { additional_space = max (min_cont_size, additional_space); dprintf (SEG_REUSE_LOG_0, ("(gen2)Found segment %Ix to reuse without bestfit, with committing end of seg for eph", seg)); } else { if (settings.concurrent) { can_fit = FALSE; commit_end_of_seg = FALSE; } else { size_t additional_space_bestfit = additional_space; if (!has_fit_gen_starts) { if (additional_space_bestfit < (eph_gen_starts + Align (min_obj_size))) { dprintf (SEG_REUSE_LOG_0, ("(gen2)Couldn't fit, gen starts not allocated yet and end space is too small: %Id", additional_space_bestfit)); return FALSE; } bestfit_first_pin = heap_segment_plan_allocated (seg); additional_space_bestfit -= eph_gen_starts; } can_fit = best_fit (free_space, largest_free_space, additional_space_bestfit, &commit_end_of_seg); if (can_fit) { dprintf (SEG_REUSE_LOG_0, ("(gen2)Found segment %Ix to reuse with bestfit, %s committing end of seg", seg, (commit_end_of_seg ? "with" : "without"))); } else { dprintf (SEG_REUSE_LOG_0, ("(gen2)Couldn't fit, total free space is %Ix", (free_space + end_space))); } } } } else { dprintf (SEG_REUSE_LOG_0, ("(gen2)Found segment %Ix to reuse without bestfit, with committing end of seg", seg)); } assert (additional_space <= end_space); if (commit_end_of_seg) { if (!grow_heap_segment (seg, heap_segment_plan_allocated (seg) + additional_space)) { dprintf (2, ("Couldn't commit end of segment?!")); use_bestfit = FALSE; return FALSE; } if (use_bestfit) { // We increase the index here because growing heap segment could create a discrepency with // the additional space we used (could be bigger). size_t free_space_end_of_seg = heap_segment_committed (seg) - heap_segment_plan_allocated (seg); int relative_free_space_index = relative_index_power2_free_space (round_down_power2 (free_space_end_of_seg)); saved_ordered_free_space_indices[relative_free_space_index]++; } } if (use_bestfit) { memcpy (ordered_free_space_indices, saved_ordered_free_space_indices, sizeof(ordered_free_space_indices)); max_free_space_items = max (MIN_NUM_FREE_SPACES, free_space_items * 3 / 2); max_free_space_items = min (MAX_NUM_FREE_SPACES, max_free_space_items); dprintf (SEG_REUSE_LOG_0, ("could fit! %Id free spaces, %Id max", free_space_items, max_free_space_items)); } return can_fit; } dprintf (SEG_REUSE_LOG_0, ("(gen2)Couldn't fit, total free space is %Ix", (free_space + end_space))); return FALSE; } else { assert (settings.condemned_generation == (max_generation-1)); size_t free_space = (end_address - heap_segment_plan_allocated (seg)); size_t largest_free_space = free_space; dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p: gen1: testing segment [%Ix %Ix", first_address, end_address)); //find the first free list in range of the current segment uint8_t* free_list = 0; unsigned int a_l_idx = gen_allocator->first_suitable_bucket(eph_gen_starts); for (; a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++) { free_list = gen_allocator->alloc_list_head_of (a_l_idx); while (free_list) { if ((free_list >= first_address) && (free_list < end_address) && (unused_array_size (free_list) >= eph_gen_starts)) { goto next; } else { free_list = free_list_slot (free_list); } } } next: if (free_list) { init_ordered_free_space_indices (); if (process_free_space (seg, unused_array_size (free_list) - eph_gen_starts + Align (min_obj_size), min_free_size, min_cont_size, &free_space, &largest_free_space)) { return TRUE; } free_list = free_list_slot (free_list); } else { dprintf (SEG_REUSE_LOG_0, ("(gen1)Couldn't fit, no free list")); return FALSE; } //tally up free space while (1) { while (free_list) { if ((free_list >= first_address) && (free_list < end_address) && process_free_space (seg, unused_array_size (free_list), min_free_size, min_cont_size, &free_space, &largest_free_space)) { return TRUE; } free_list = free_list_slot (free_list); } a_l_idx++; if (a_l_idx < gen_allocator->number_of_buckets()) { free_list = gen_allocator->alloc_list_head_of (a_l_idx); } else break; } dprintf (SEG_REUSE_LOG_0, ("(gen1)Couldn't fit, total free space is %Ix", free_space)); return FALSE; /* BOOL can_fit = best_fit (free_space, 0, NULL); if (can_fit) { dprintf (SEG_REUSE_LOG_0, ("(gen1)Found segment %Ix to reuse with bestfit", seg)); } else { dprintf (SEG_REUSE_LOG_0, ("(gen1)Couldn't fit, total free space is %Ix", free_space)); } return can_fit; */ } } void gc_heap::realloc_plug (size_t last_plug_size, uint8_t*& last_plug, generation* gen, uint8_t* start_address, unsigned int& active_new_gen_number, uint8_t*& last_pinned_gap, BOOL& leftp, BOOL shortened_p #ifdef SHORT_PLUGS , mark* pinned_plug_entry #endif //SHORT_PLUGS ) { // detect generation boundaries // make sure that active_new_gen_number is not the youngest generation. // because the generation_limit wouldn't return the right thing in this case. if (!use_bestfit) { if ((active_new_gen_number > 1) && (last_plug >= generation_limit (active_new_gen_number))) { assert (last_plug >= start_address); active_new_gen_number--; realloc_plan_generation_start (generation_of (active_new_gen_number), gen); assert (generation_plan_allocation_start (generation_of (active_new_gen_number))); leftp = FALSE; } } // detect pinned plugs if (!pinned_plug_que_empty_p() && (last_plug == pinned_plug (oldest_pin()))) { size_t entry = deque_pinned_plug(); mark* m = pinned_plug_of (entry); size_t saved_pinned_len = pinned_len(m); pinned_len(m) = last_plug - last_pinned_gap; //dprintf (3,("Adjusting pinned gap: [%Ix, %Ix[", (size_t)last_pinned_gap, (size_t)last_plug)); if (m->has_post_plug_info()) { last_plug_size += sizeof (gap_reloc_pair); dprintf (3, ("ra pinned %Ix was shortened, adjusting plug size to %Ix", last_plug, last_plug_size)) } last_pinned_gap = last_plug + last_plug_size; dprintf (3, ("ra found pin %Ix, len: %Ix->%Ix, last_p: %Ix, last_p_size: %Ix", pinned_plug (m), saved_pinned_len, pinned_len (m), last_plug, last_plug_size)); leftp = FALSE; //we are creating a generation fault. set the cards. { size_t end_card = card_of (align_on_card (last_plug + last_plug_size)); size_t card = card_of (last_plug); while (card != end_card) { set_card (card); card++; } } } else if (last_plug >= start_address) { #ifdef FEATURE_STRUCTALIGN int requiredAlignment; ptrdiff_t pad; node_aligninfo (last_plug, requiredAlignment, pad); // from how we previously aligned the plug's destination address, // compute the actual alignment offset. uint8_t* reloc_plug = last_plug + node_relocation_distance (last_plug); ptrdiff_t alignmentOffset = ComputeStructAlignPad(reloc_plug, requiredAlignment, 0); if (!alignmentOffset) { // allocate_in_expanded_heap doesn't expect alignmentOffset to be zero. alignmentOffset = requiredAlignment; } //clear the alignment info because we are reallocating clear_node_aligninfo (last_plug); #else // FEATURE_STRUCTALIGN //clear the realignment flag because we are reallocating clear_node_realigned (last_plug); #endif // FEATURE_STRUCTALIGN BOOL adjacentp = FALSE; BOOL set_padding_on_saved_p = FALSE; if (shortened_p) { last_plug_size += sizeof (gap_reloc_pair); #ifdef SHORT_PLUGS assert (pinned_plug_entry != NULL); if (last_plug_size <= sizeof (plug_and_gap)) { set_padding_on_saved_p = TRUE; } #endif //SHORT_PLUGS dprintf (3, ("ra plug %Ix was shortened, adjusting plug size to %Ix", last_plug, last_plug_size)) } #ifdef SHORT_PLUGS clear_padding_in_expand (last_plug, set_padding_on_saved_p, pinned_plug_entry); #endif //SHORT_PLUGS uint8_t* new_address = allocate_in_expanded_heap(gen, last_plug_size, adjacentp, last_plug, #ifdef SHORT_PLUGS set_padding_on_saved_p, pinned_plug_entry, #endif //SHORT_PLUGS TRUE, active_new_gen_number REQD_ALIGN_AND_OFFSET_ARG); dprintf (3, ("ra NA: [%Ix, %Ix[: %Ix", new_address, (new_address + last_plug_size), last_plug_size)); assert (new_address); set_node_relocation_distance (last_plug, new_address - last_plug); #ifdef FEATURE_STRUCTALIGN if (leftp && node_alignpad (last_plug) == 0) #else // FEATURE_STRUCTALIGN if (leftp && !node_realigned (last_plug)) #endif // FEATURE_STRUCTALIGN { // TODO - temporarily disable L optimization because of a bug in it. //set_node_left (last_plug); } dprintf (3,(" Re-allocating %Ix->%Ix len %Id", (size_t)last_plug, (size_t)new_address, last_plug_size)); leftp = adjacentp; } } void gc_heap::realloc_in_brick (uint8_t* tree, uint8_t*& last_plug, uint8_t* start_address, generation* gen, unsigned int& active_new_gen_number, uint8_t*& last_pinned_gap, BOOL& leftp) { assert (tree != NULL); int left_node = node_left_child (tree); int right_node = node_right_child (tree); dprintf (3, ("ra: tree: %Ix, last_pin_gap: %Ix, last_p: %Ix, L: %d, R: %d", tree, last_pinned_gap, last_plug, left_node, right_node)); if (left_node) { dprintf (3, ("LN: realloc %Ix(%Ix)", (tree + left_node), last_plug)); realloc_in_brick ((tree + left_node), last_plug, start_address, gen, active_new_gen_number, last_pinned_gap, leftp); } if (last_plug != 0) { uint8_t* plug = tree; BOOL has_pre_plug_info_p = FALSE; BOOL has_post_plug_info_p = FALSE; mark* pinned_plug_entry = get_next_pinned_entry (tree, &has_pre_plug_info_p, &has_post_plug_info_p, FALSE); // We only care about the pre plug info 'cause that's what decides if the last plug is shortened. // The pinned plugs are handled in realloc_plug. size_t gap_size = node_gap_size (plug); uint8_t* gap = (plug - gap_size); uint8_t* last_plug_end = gap; size_t last_plug_size = (last_plug_end - last_plug); // Cannot assert this - a plug could be less than that due to the shortened ones. //assert (last_plug_size >= Align (min_obj_size)); dprintf (3, ("ra: plug %Ix, gap size: %Ix, last_pin_gap: %Ix, last_p: %Ix, last_p_end: %Ix, shortened: %d", plug, gap_size, last_pinned_gap, last_plug, last_plug_end, (has_pre_plug_info_p ? 1 : 0))); realloc_plug (last_plug_size, last_plug, gen, start_address, active_new_gen_number, last_pinned_gap, leftp, has_pre_plug_info_p #ifdef SHORT_PLUGS , pinned_plug_entry #endif //SHORT_PLUGS ); } last_plug = tree; if (right_node) { dprintf (3, ("RN: realloc %Ix(%Ix)", (tree + right_node), last_plug)); realloc_in_brick ((tree + right_node), last_plug, start_address, gen, active_new_gen_number, last_pinned_gap, leftp); } } void gc_heap::realloc_plugs (generation* consing_gen, heap_segment* seg, uint8_t* start_address, uint8_t* end_address, unsigned active_new_gen_number) { dprintf (3, ("--- Reallocing ---")); if (use_bestfit) { //make sure that every generation has a planned allocation start int gen_number = max_generation - 1; while (gen_number >= 0) { generation* gen = generation_of (gen_number); if (0 == generation_plan_allocation_start (gen)) { generation_plan_allocation_start (gen) = bestfit_first_pin + (max_generation - gen_number - 1) * Align (min_obj_size); generation_plan_allocation_start_size (gen) = Align (min_obj_size); assert (generation_plan_allocation_start (gen)); } gen_number--; } } uint8_t* first_address = start_address; //Look for the right pinned plug to start from. reset_pinned_queue_bos(); uint8_t* planned_ephemeral_seg_end = heap_segment_plan_allocated (seg); while (!pinned_plug_que_empty_p()) { mark* m = oldest_pin(); if ((pinned_plug (m) >= planned_ephemeral_seg_end) && (pinned_plug (m) < end_address)) { if (pinned_plug (m) < first_address) { first_address = pinned_plug (m); } break; } else deque_pinned_plug(); } size_t current_brick = brick_of (first_address); size_t end_brick = brick_of (end_address-1); uint8_t* last_plug = 0; uint8_t* last_pinned_gap = heap_segment_plan_allocated (seg); BOOL leftp = FALSE; dprintf (3, ("start addr: %Ix, first addr: %Ix, current oldest pin: %Ix", start_address, first_address, pinned_plug (oldest_pin()))); while (current_brick <= end_brick) { int brick_entry = brick_table [ current_brick ]; if (brick_entry >= 0) { realloc_in_brick ((brick_address (current_brick) + brick_entry - 1), last_plug, start_address, consing_gen, active_new_gen_number, last_pinned_gap, leftp); } current_brick++; } if (last_plug != 0) { realloc_plug (end_address - last_plug, last_plug, consing_gen, start_address, active_new_gen_number, last_pinned_gap, leftp, FALSE #ifdef SHORT_PLUGS , NULL #endif //SHORT_PLUGS ); } //Fix the old segment allocated size assert (last_pinned_gap >= heap_segment_mem (seg)); assert (last_pinned_gap <= heap_segment_committed (seg)); heap_segment_plan_allocated (seg) = last_pinned_gap; } void gc_heap::set_expand_in_full_gc (int condemned_gen_number) { if (!should_expand_in_full_gc) { if ((condemned_gen_number != max_generation) && (settings.pause_mode != pause_low_latency) && (settings.pause_mode != pause_sustained_low_latency)) { should_expand_in_full_gc = TRUE; } } } void gc_heap::save_ephemeral_generation_starts() { for (int ephemeral_generation = 0; ephemeral_generation < max_generation; ephemeral_generation++) { saved_ephemeral_plan_start[ephemeral_generation] = generation_plan_allocation_start (generation_of (ephemeral_generation)); saved_ephemeral_plan_start_size[ephemeral_generation] = generation_plan_allocation_start_size (generation_of (ephemeral_generation)); } } generation* gc_heap::expand_heap (int condemned_generation, generation* consing_gen, heap_segment* new_heap_segment) { #ifndef _DEBUG UNREFERENCED_PARAMETER(condemned_generation); #endif //!_DEBUG assert (condemned_generation >= (max_generation -1)); unsigned int active_new_gen_number = max_generation; //Set one too high to get generation gap uint8_t* start_address = generation_limit (max_generation); uint8_t* end_address = heap_segment_allocated (ephemeral_heap_segment); BOOL should_promote_ephemeral = FALSE; ptrdiff_t eph_size = total_ephemeral_size; #ifdef BACKGROUND_GC dprintf(2,("%s: ---- Heap Expansion ----", (gc_heap::background_running_p() ? "FGC" : "NGC"))); #endif //BACKGROUND_GC settings.heap_expansion = TRUE; //reset the elevation state for next time. dprintf (2, ("Elevation: elevation = el_none")); if (settings.should_lock_elevation && !expand_reused_seg_p()) settings.should_lock_elevation = FALSE; heap_segment* new_seg = new_heap_segment; if (!new_seg) return consing_gen; //copy the card and brick tables if (g_gc_card_table!= card_table) copy_brick_card_table(); BOOL new_segment_p = (heap_segment_next (new_seg) == 0); dprintf (2, ("new_segment_p %Ix", (size_t)new_segment_p)); assert (generation_plan_allocation_start (generation_of (max_generation-1))); assert (generation_plan_allocation_start (generation_of (max_generation-1)) >= heap_segment_mem (ephemeral_heap_segment)); assert (generation_plan_allocation_start (generation_of (max_generation-1)) <= heap_segment_committed (ephemeral_heap_segment)); assert (generation_plan_allocation_start (youngest_generation)); assert (generation_plan_allocation_start (youngest_generation) < heap_segment_plan_allocated (ephemeral_heap_segment)); if (settings.pause_mode == pause_no_gc) { // We don't reuse for no gc, so the size used on the new eph seg is eph_size. if ((size_t)(heap_segment_reserved (new_seg) - heap_segment_mem (new_seg)) < (eph_size + soh_allocation_no_gc)) should_promote_ephemeral = TRUE; } else { if (!use_bestfit) { should_promote_ephemeral = dt_low_ephemeral_space_p (tuning_deciding_promote_ephemeral); } } if (should_promote_ephemeral) { ephemeral_promotion = TRUE; get_gc_data_per_heap()->set_mechanism (gc_heap_expand, expand_new_seg_ep); dprintf (2, ("promoting ephemeral")); save_ephemeral_generation_starts(); // We also need to adjust free_obj_space (due to padding) here because now young gens' free_obj_space will // belong to gen2. generation* max_gen = generation_of (max_generation); for (int i = 1; i < max_generation; i++) { generation_free_obj_space (max_gen) += generation_free_obj_space (generation_of (i)); dprintf (2, ("[h%d] maxgen freeobj + %Id=%Id", heap_number, generation_free_obj_space (generation_of (i)), generation_free_obj_space (max_gen))); } // TODO: This is actually insufficient - if BACKGROUND_GC is not defined we'd need to commit more // in order to accommodate eph gen starts. Also in the no_gc we should make sure used // is updated correctly. heap_segment_used (new_seg) = heap_segment_committed (new_seg); } else { // commit the new ephemeral segment all at once if it is a new one. if ((eph_size > 0) && new_segment_p) { #ifdef FEATURE_STRUCTALIGN // The destination may require a larger alignment padding than the source. // Assume the worst possible alignment padding. eph_size += ComputeStructAlignPad(heap_segment_mem (new_seg), MAX_STRUCTALIGN, OBJECT_ALIGNMENT_OFFSET); #endif // FEATURE_STRUCTALIGN #ifdef RESPECT_LARGE_ALIGNMENT //Since the generation start can be larger than min_obj_size //The alignment could be switched. eph_size += switch_alignment_size(FALSE); #endif //RESPECT_LARGE_ALIGNMENT //Since the generation start can be larger than min_obj_size //Compare the alignment of the first object in gen1 if (grow_heap_segment (new_seg, heap_segment_mem (new_seg) + eph_size) == 0) { fgm_result.set_fgm (fgm_commit_eph_segment, eph_size, FALSE); return consing_gen; } heap_segment_used (new_seg) = heap_segment_committed (new_seg); } //Fix the end of the old ephemeral heap segment heap_segment_plan_allocated (ephemeral_heap_segment) = generation_plan_allocation_start (generation_of (max_generation-1)); dprintf (3, ("Old ephemeral allocated set to %Ix", (size_t)heap_segment_plan_allocated (ephemeral_heap_segment))); } if (new_segment_p) { // TODO - Is this really necessary? We should think about it. //initialize the first brick size_t first_brick = brick_of (heap_segment_mem (new_seg)); set_brick (first_brick, heap_segment_mem (new_seg) - brick_address (first_brick)); } //From this point on, we cannot run out of memory //reset the allocation of the consing generation back to the end of the //old ephemeral segment generation_allocation_limit (consing_gen) = heap_segment_plan_allocated (ephemeral_heap_segment); generation_allocation_pointer (consing_gen) = generation_allocation_limit (consing_gen); generation_allocation_segment (consing_gen) = ephemeral_heap_segment; //clear the generation gap for all of the ephemeral generations { int generation_num = max_generation-1; while (generation_num >= 0) { generation* gen = generation_of (generation_num); generation_plan_allocation_start (gen) = 0; generation_num--; } } heap_segment* old_seg = ephemeral_heap_segment; ephemeral_heap_segment = new_seg; //Note: the ephemeral segment shouldn't be threaded onto the segment chain //because the relocation and compact phases shouldn't see it // set the generation members used by allocate_in_expanded_heap // and switch to ephemeral generation consing_gen = ensure_ephemeral_heap_segment (consing_gen); if (!should_promote_ephemeral) { realloc_plugs (consing_gen, old_seg, start_address, end_address, active_new_gen_number); } if (!use_bestfit) { repair_allocation_in_expanded_heap (consing_gen); } // assert that the generation gap for all of the ephemeral generations were allocated. #ifdef _DEBUG { int generation_num = max_generation-1; while (generation_num >= 0) { generation* gen = generation_of (generation_num); assert (generation_plan_allocation_start (gen)); generation_num--; } } #endif // _DEBUG if (!new_segment_p) { dprintf (2, ("Demoting ephemeral segment")); //demote the entire segment. settings.demotion = TRUE; get_gc_data_per_heap()->set_mechanism_bit (gc_demotion_bit); demotion_low = heap_segment_mem (ephemeral_heap_segment); demotion_high = heap_segment_reserved (ephemeral_heap_segment); } else { demotion_low = MAX_PTR; demotion_high = 0; #ifndef MULTIPLE_HEAPS settings.demotion = FALSE; get_gc_data_per_heap()->clear_mechanism_bit (gc_demotion_bit); #endif //!MULTIPLE_HEAPS } if (!should_promote_ephemeral && new_segment_p) { assert ((ptrdiff_t)total_ephemeral_size <= eph_size); } if (heap_segment_mem (old_seg) == heap_segment_plan_allocated (old_seg)) { // This is to catch when we accidently delete a segment that has pins. verify_no_pins (heap_segment_mem (old_seg), heap_segment_reserved (old_seg)); } verify_no_pins (heap_segment_plan_allocated (old_seg), heap_segment_reserved(old_seg)); dprintf(2,("---- End of Heap Expansion ----")); return consing_gen; } #endif //!USE_REGIONS BOOL gc_heap::expand_reused_seg_p() { #ifdef USE_REGIONS return FALSE; #else BOOL reused_seg = FALSE; int heap_expand_mechanism = gc_data_per_heap.get_mechanism (gc_heap_expand); if ((heap_expand_mechanism == expand_reuse_bestfit) || (heap_expand_mechanism == expand_reuse_normal)) { reused_seg = TRUE; } return reused_seg; #endif //USE_REGIONS } void gc_heap::verify_no_pins (uint8_t* start, uint8_t* end) { #ifdef VERIFY_HEAP if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { BOOL contains_pinned_plugs = FALSE; size_t mi = 0; mark* m = 0; while (mi != mark_stack_tos) { m = pinned_plug_of (mi); if ((pinned_plug (m) >= start) && (pinned_plug (m) < end)) { contains_pinned_plugs = TRUE; break; } else mi++; } if (contains_pinned_plugs) { FATAL_GC_ERROR(); } } #endif //VERIFY_HEAP } void gc_heap::set_static_data() { static_data* pause_mode_sdata = static_data_table[latency_level]; for (int i = 0; i < total_generation_count; i++) { dynamic_data* dd = dynamic_data_of (i); static_data* sdata = &pause_mode_sdata[i]; dd->sdata = sdata; dd->min_size = sdata->min_size; dprintf (GTC_LOG, ("PM: %d, gen%d: min: %Id, max: %Id, fr_l: %Id, fr_b: %d%%", settings.pause_mode,i, dd->min_size, dd_max_size (dd), sdata->fragmentation_limit, (int)(sdata->fragmentation_burden_limit * 100))); } } // Initialize the values that are not const. void gc_heap::init_static_data() { size_t gen0_min_size = get_gen0_min_size(); size_t gen0_max_size = #ifdef MULTIPLE_HEAPS max (6*1024*1024, min ( Align(soh_segment_size/2), 200*1024*1024)); #else //MULTIPLE_HEAPS ( #ifdef BACKGROUND_GC gc_can_use_concurrent ? 6*1024*1024 : #endif //BACKGROUND_GC max (6*1024*1024, min ( Align(soh_segment_size/2), 200*1024*1024)) ); #endif //MULTIPLE_HEAPS gen0_max_size = max (gen0_min_size, gen0_max_size); if (heap_hard_limit) { size_t gen0_max_size_seg = soh_segment_size / 4; dprintf (GTC_LOG, ("limit gen0 max %Id->%Id", gen0_max_size, gen0_max_size_seg)); gen0_max_size = min (gen0_max_size, gen0_max_size_seg); } size_t gen0_max_size_config = (size_t)GCConfig::GetGCGen0MaxBudget(); if (gen0_max_size_config) { gen0_max_size = min (gen0_max_size, gen0_max_size_config); #ifdef FEATURE_EVENT_TRACE gen0_max_budget_from_config = gen0_max_size; #endif //FEATURE_EVENT_TRACE } gen0_max_size = Align (gen0_max_size); gen0_min_size = min (gen0_min_size, gen0_max_size); // TODO: gen0_max_size has a 200mb cap; gen1_max_size should also have a cap. size_t gen1_max_size = (size_t) #ifdef MULTIPLE_HEAPS max (6*1024*1024, Align(soh_segment_size/2)); #else //MULTIPLE_HEAPS ( #ifdef BACKGROUND_GC gc_can_use_concurrent ? 6*1024*1024 : #endif //BACKGROUND_GC max (6*1024*1024, Align(soh_segment_size/2)) ); #endif //MULTIPLE_HEAPS size_t gen1_max_size_config = (size_t)GCConfig::GetGCGen1MaxBudget(); if (gen1_max_size_config) { gen1_max_size = min (gen1_max_size, gen1_max_size_config); } gen1_max_size = Align (gen1_max_size); dprintf (GTC_LOG, ("gen0 min: %Id, max: %Id, gen1 max: %Id", gen0_min_size, gen0_max_size, gen1_max_size)); for (int i = latency_level_first; i <= latency_level_last; i++) { static_data_table[i][0].min_size = gen0_min_size; static_data_table[i][0].max_size = gen0_max_size; static_data_table[i][1].max_size = gen1_max_size; } } bool gc_heap::init_dynamic_data() { uint64_t now_raw_ts = RawGetHighPrecisionTimeStamp (); #ifdef HEAP_BALANCE_INSTRUMENTATION start_raw_ts = now_raw_ts; #endif //HEAP_BALANCE_INSTRUMENTATION uint64_t now = (uint64_t)((double)now_raw_ts * qpf_us); set_static_data(); if (heap_number == 0) { process_start_time = now; smoothed_desired_per_heap[0] = dynamic_data_of (0)->min_size; #ifdef HEAP_BALANCE_INSTRUMENTATION last_gc_end_time_us = now; dprintf (HEAP_BALANCE_LOG, ("qpf=%I64d, start: %I64d(%d)", qpf, start_raw_ts, now)); #endif //HEAP_BALANCE_INSTRUMENTATION } for (int i = 0; i < total_generation_count; i++) { dynamic_data* dd = dynamic_data_of (i); dd->gc_clock = 0; dd->time_clock = now; dd->previous_time_clock = now; dd->current_size = 0; dd->promoted_size = 0; dd->collection_count = 0; dd->new_allocation = dd->min_size; dd->gc_new_allocation = dd->new_allocation; dd->desired_allocation = dd->new_allocation; dd->fragmentation = 0; } return true; } float gc_heap::surv_to_growth (float cst, float limit, float max_limit) { if (cst < ((max_limit - limit ) / (limit * (max_limit-1.0f)))) return ((limit - limit*cst) / (1.0f - (cst * limit))); else return max_limit; } //if the allocation budget wasn't exhausted, the new budget may be wrong because the survival may //not be correct (collection happened too soon). Correct with a linear estimation based on the previous //value of the budget static size_t linear_allocation_model (float allocation_fraction, size_t new_allocation, size_t previous_desired_allocation, float time_since_previous_collection_secs) { if ((allocation_fraction < 0.95) && (allocation_fraction > 0.0)) { const float decay_time = 5*60.0f; // previous desired allocation expires over 5 minutes float decay_factor = (decay_time <= time_since_previous_collection_secs) ? 0 : ((decay_time - time_since_previous_collection_secs) / decay_time); float previous_allocation_factor = (1.0f - allocation_fraction) * decay_factor; dprintf (2, ("allocation fraction: %d, decay factor: %d, previous allocation factor: %d", (int)(allocation_fraction*100.0), (int)(decay_factor*100.0), (int)(previous_allocation_factor*100.0))); new_allocation = (size_t)((1.0 - previous_allocation_factor)*new_allocation + previous_allocation_factor * previous_desired_allocation); } return new_allocation; } size_t gc_heap::desired_new_allocation (dynamic_data* dd, size_t out, int gen_number, int pass) { gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap(); if (dd_begin_data_size (dd) == 0) { size_t new_allocation = dd_min_size (dd); current_gc_data_per_heap->gen_data[gen_number].new_allocation = new_allocation; return new_allocation; } else { float cst; size_t previous_desired_allocation = dd_desired_allocation (dd); size_t current_size = dd_current_size (dd); float max_limit = dd_max_limit (dd); float limit = dd_limit (dd); size_t min_gc_size = dd_min_size (dd); float f = 0; size_t max_size = dd_max_size (dd); size_t new_allocation = 0; float time_since_previous_collection_secs = (dd_time_clock (dd) - dd_previous_time_clock (dd))*1e-6f; float allocation_fraction = (float) (dd_desired_allocation (dd) - dd_gc_new_allocation (dd)) / (float) (dd_desired_allocation (dd)); if (gen_number >= max_generation) { size_t new_size = 0; cst = min (1.0f, float (out) / float (dd_begin_data_size (dd))); f = surv_to_growth (cst, limit, max_limit); if (conserve_mem_setting != 0) { // if this is set, compute a growth factor based on it. // example: a setting of 6 means we have a goal of 60% live data // this means we allow 40% fragmentation // to keep heap size stable, we only use half of that (20%) for new allocation // f is (live data + new allocation)/(live data), so would be (60% + 20%) / 60% or 1.33 float f_conserve = ((10.0f / conserve_mem_setting) - 1) * 0.5f + 1.0f; // use the smaller one f = min (f, f_conserve); } size_t max_growth_size = (size_t)(max_size / f); if (current_size >= max_growth_size) { new_size = max_size; } else { new_size = (size_t) min (max ( (f * current_size), min_gc_size), max_size); } assert ((new_size >= current_size) || (new_size == max_size)); if (gen_number == max_generation) { new_allocation = max((new_size - current_size), min_gc_size); new_allocation = linear_allocation_model (allocation_fraction, new_allocation, dd_desired_allocation (dd), time_since_previous_collection_secs); if ( #ifdef BGC_SERVO_TUNING !bgc_tuning::fl_tuning_triggered && #endif //BGC_SERVO_TUNING (conserve_mem_setting == 0) && (dd_fragmentation (dd) > ((size_t)((f-1)*current_size)))) { //reducing allocation in case of fragmentation size_t new_allocation1 = max (min_gc_size, // CAN OVERFLOW (size_t)((float)new_allocation * current_size / ((float)current_size + 2*dd_fragmentation (dd)))); dprintf (2, ("Reducing max_gen allocation due to fragmentation from %Id to %Id", new_allocation, new_allocation1)); new_allocation = new_allocation1; } } else // not a SOH generation { uint32_t memory_load = 0; uint64_t available_physical = 0; get_memory_info (&memory_load, &available_physical); #ifdef TRACE_GC if (heap_hard_limit) { size_t allocated = 0; size_t committed = uoh_committed_size (gen_number, &allocated); dprintf (1, ("GC#%Id h%d, GMI: UOH budget, UOH commit %Id (obj %Id, frag %Id), total commit: %Id (recorded: %Id)", (size_t)settings.gc_index, heap_number, committed, allocated, dd_fragmentation (dynamic_data_of (gen_number)), get_total_committed_size(), (current_total_committed - current_total_committed_bookkeeping))); } #endif //TRACE_GC if (heap_number == 0) settings.exit_memory_load = memory_load; if (available_physical > 1024*1024) available_physical -= 1024*1024; uint64_t available_free = available_physical + (uint64_t)generation_free_list_space (generation_of (gen_number)); if (available_free > (uint64_t)MAX_PTR) { available_free = (uint64_t)MAX_PTR; } //try to avoid OOM during large object allocation new_allocation = max (min(max((new_size - current_size), dd_desired_allocation (dynamic_data_of (max_generation))), (size_t)available_free), max ((current_size/4), min_gc_size)); new_allocation = linear_allocation_model (allocation_fraction, new_allocation, dd_desired_allocation (dd), time_since_previous_collection_secs); } } else { size_t survivors = out; cst = float (survivors) / float (dd_begin_data_size (dd)); f = surv_to_growth (cst, limit, max_limit); new_allocation = (size_t) min (max ((f * (survivors)), min_gc_size), max_size); new_allocation = linear_allocation_model (allocation_fraction, new_allocation, dd_desired_allocation (dd), time_since_previous_collection_secs); if (gen_number == 0) { if (pass == 0) { size_t free_space = generation_free_list_space (generation_of (gen_number)); // DTREVIEW - is min_gc_size really a good choice? // on 64-bit this will almost always be true. dprintf (GTC_LOG, ("frag: %Id, min: %Id", free_space, min_gc_size)); if (free_space > min_gc_size) { settings.gen0_reduction_count = 2; } else { if (settings.gen0_reduction_count > 0) settings.gen0_reduction_count--; } } if (settings.gen0_reduction_count > 0) { dprintf (2, ("Reducing new allocation based on fragmentation")); new_allocation = min (new_allocation, max (min_gc_size, (max_size/3))); } } } size_t new_allocation_ret = Align (new_allocation, get_alignment_constant (gen_number <= max_generation)); int gen_data_index = gen_number; gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_data_index]); gen_data->new_allocation = new_allocation_ret; dd_surv (dd) = cst; dprintf (1, (ThreadStressLog::gcDesiredNewAllocationMsg(), heap_number, gen_number, out, current_size, (dd_desired_allocation (dd) - dd_gc_new_allocation (dd)), (int)(cst*100), (int)(f*100), current_size + new_allocation, new_allocation)); return new_allocation_ret; } } // REGIONS TODO: this can be merged with generation_size. //returns the planned size of a generation (including free list element) size_t gc_heap::generation_plan_size (int gen_number) { #ifdef USE_REGIONS size_t result = 0; heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (gen_number))); while (seg) { uint8_t* end = heap_segment_plan_allocated (seg); result += end - heap_segment_mem (seg); dprintf (REGIONS_LOG, ("h%d size + %Id (%Ix - %Ix) -> %Id", heap_number, (end - heap_segment_mem (seg)), heap_segment_mem (seg), end, result)); seg = heap_segment_next (seg); } return result; #else //USE_REGIONS if (0 == gen_number) return max((heap_segment_plan_allocated (ephemeral_heap_segment) - generation_plan_allocation_start (generation_of (gen_number))), (int)Align (min_obj_size)); else { generation* gen = generation_of (gen_number); if (heap_segment_rw (generation_start_segment (gen)) == ephemeral_heap_segment) return (generation_plan_allocation_start (generation_of (gen_number - 1)) - generation_plan_allocation_start (generation_of (gen_number))); else { size_t gensize = 0; heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(seg != NULL); while (seg && (seg != ephemeral_heap_segment)) { gensize += heap_segment_plan_allocated (seg) - heap_segment_mem (seg); seg = heap_segment_next_rw (seg); } if (seg) { gensize += (generation_plan_allocation_start (generation_of (gen_number - 1)) - heap_segment_mem (ephemeral_heap_segment)); } return gensize; } } #endif //USE_REGIONS } //returns the size of a generation (including free list element) size_t gc_heap::generation_size (int gen_number) { #ifdef USE_REGIONS size_t result = 0; heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (gen_number))); while (seg) { uint8_t* end = heap_segment_allocated (seg); result += end - heap_segment_mem (seg); dprintf (2, ("h%d size + %Id (%Ix - %Ix) -> %Id", heap_number, (end - heap_segment_mem (seg)), heap_segment_mem (seg), end, result)); seg = heap_segment_next (seg); } return result; #else //USE_REGIONS if (0 == gen_number) return max((heap_segment_allocated (ephemeral_heap_segment) - generation_allocation_start (generation_of (gen_number))), (int)Align (min_obj_size)); else { generation* gen = generation_of (gen_number); if (heap_segment_rw (generation_start_segment (gen)) == ephemeral_heap_segment) return (generation_allocation_start (generation_of (gen_number - 1)) - generation_allocation_start (generation_of (gen_number))); else { size_t gensize = 0; heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(seg != NULL); while (seg && (seg != ephemeral_heap_segment)) { gensize += heap_segment_allocated (seg) - heap_segment_mem (seg); seg = heap_segment_next_rw (seg); } if (seg) { gensize += (generation_allocation_start (generation_of (gen_number - 1)) - heap_segment_mem (ephemeral_heap_segment)); } return gensize; } } #endif //USE_REGIONS } size_t gc_heap::compute_in (int gen_number) { assert (gen_number != 0); dynamic_data* dd = dynamic_data_of (gen_number); size_t in = generation_allocation_size (generation_of (gen_number)); if (gen_number == max_generation && ephemeral_promotion) { in = 0; for (int i = 0; i <= max_generation; i++) { dynamic_data* dd = dynamic_data_of (i); in += dd_survived_size (dd); if (i != max_generation) { generation_condemned_allocated (generation_of (gen_number)) += dd_survived_size (dd); } } } dd_gc_new_allocation (dd) -= in; dd_new_allocation (dd) = dd_gc_new_allocation (dd); gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap(); gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_number]); gen_data->in = in; generation_allocation_size (generation_of (gen_number)) = 0; return in; } void gc_heap::compute_promoted_allocation (int gen_number) { compute_in (gen_number); } #ifdef HOST_64BIT inline size_t gc_heap::trim_youngest_desired (uint32_t memory_load, size_t total_new_allocation, size_t total_min_allocation) { if (memory_load < MAX_ALLOWED_MEM_LOAD) { // If the total of memory load and gen0 budget exceeds // our max memory load limit, trim the gen0 budget so the total // is the max memory load limit. size_t remain_memory_load = (MAX_ALLOWED_MEM_LOAD - memory_load) * mem_one_percent; return min (total_new_allocation, remain_memory_load); } else { size_t total_max_allocation = max (mem_one_percent, total_min_allocation); return min (total_new_allocation, total_max_allocation); } } size_t gc_heap::joined_youngest_desired (size_t new_allocation) { dprintf (2, ("Entry memory load: %d; gen0 new_alloc: %Id", settings.entry_memory_load, new_allocation)); size_t final_new_allocation = new_allocation; if (new_allocation > MIN_YOUNGEST_GEN_DESIRED) { uint32_t num_heaps = 1; #ifdef MULTIPLE_HEAPS num_heaps = gc_heap::n_heaps; #endif //MULTIPLE_HEAPS size_t total_new_allocation = new_allocation * num_heaps; size_t total_min_allocation = MIN_YOUNGEST_GEN_DESIRED * num_heaps; if ((settings.entry_memory_load >= MAX_ALLOWED_MEM_LOAD) || (total_new_allocation > max (youngest_gen_desired_th, total_min_allocation))) { uint32_t memory_load = 0; get_memory_info (&memory_load); settings.exit_memory_load = memory_load; dprintf (2, ("Current memory load: %d", memory_load)); size_t final_total = trim_youngest_desired (memory_load, total_new_allocation, total_min_allocation); size_t max_new_allocation = #ifdef MULTIPLE_HEAPS dd_max_size (g_heaps[0]->dynamic_data_of (0)); #else //MULTIPLE_HEAPS dd_max_size (dynamic_data_of (0)); #endif //MULTIPLE_HEAPS final_new_allocation = min (Align ((final_total / num_heaps), get_alignment_constant (TRUE)), max_new_allocation); } } if (final_new_allocation < new_allocation) { settings.gen0_reduction_count = 2; } return final_new_allocation; } #endif // HOST_64BIT inline gc_history_global* gc_heap::get_gc_data_global() { #ifdef BACKGROUND_GC return (settings.concurrent ? &bgc_data_global : &gc_data_global); #else return &gc_data_global; #endif //BACKGROUND_GC } inline gc_history_per_heap* gc_heap::get_gc_data_per_heap() { #ifdef BACKGROUND_GC return (settings.concurrent ? &bgc_data_per_heap : &gc_data_per_heap); #else return &gc_data_per_heap; #endif //BACKGROUND_GC } void gc_heap::compute_new_dynamic_data (int gen_number) { PREFIX_ASSUME(gen_number >= 0); PREFIX_ASSUME(gen_number <= max_generation); dynamic_data* dd = dynamic_data_of (gen_number); generation* gen = generation_of (gen_number); size_t in = (gen_number==0) ? 0 : compute_in (gen_number); size_t total_gen_size = generation_size (gen_number); //keep track of fragmentation dd_fragmentation (dd) = generation_free_list_space (gen) + generation_free_obj_space (gen); dd_current_size (dd) = total_gen_size - dd_fragmentation (dd); gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap(); size_t out = dd_survived_size (dd); gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_number]); gen_data->size_after = total_gen_size; gen_data->free_list_space_after = generation_free_list_space (gen); gen_data->free_obj_space_after = generation_free_obj_space (gen); if ((settings.pause_mode == pause_low_latency) && (gen_number <= 1)) { // When we are in the low latency mode, we can still be // condemning more than gen1's 'cause of induced GCs. dd_desired_allocation (dd) = low_latency_alloc; dd_gc_new_allocation (dd) = dd_desired_allocation (dd); dd_new_allocation (dd) = dd_gc_new_allocation (dd); } else { if (gen_number == 0) { //compensate for dead finalizable objects promotion. //they shoudn't be counted for growth. size_t final_promoted = 0; final_promoted = min (finalization_promoted_bytes, out); // Prefast: this is clear from above but prefast needs to be told explicitly PREFIX_ASSUME(final_promoted <= out); dprintf (2, ("gen: %d final promoted: %Id", gen_number, final_promoted)); dd_freach_previous_promotion (dd) = final_promoted; size_t lower_bound = desired_new_allocation (dd, out-final_promoted, gen_number, 0); if (settings.condemned_generation == 0) { //there is no noise. dd_desired_allocation (dd) = lower_bound; } else { size_t higher_bound = desired_new_allocation (dd, out, gen_number, 1); // <TODO>This assert was causing AppDomains\unload\test1n\test1nrun.bat to fail</TODO> //assert ( lower_bound <= higher_bound); //discount the noise. Change the desired allocation //only if the previous value is outside of the range. if (dd_desired_allocation (dd) < lower_bound) { dd_desired_allocation (dd) = lower_bound; } else if (dd_desired_allocation (dd) > higher_bound) { dd_desired_allocation (dd) = higher_bound; } #if defined (HOST_64BIT) && !defined (MULTIPLE_HEAPS) dd_desired_allocation (dd) = joined_youngest_desired (dd_desired_allocation (dd)); #endif // HOST_64BIT && !MULTIPLE_HEAPS trim_youngest_desired_low_memory(); dprintf (2, ("final gen0 new_alloc: %Id", dd_desired_allocation (dd))); } } else { dd_desired_allocation (dd) = desired_new_allocation (dd, out, gen_number, 0); } dd_gc_new_allocation (dd) = dd_desired_allocation (dd); // we may have had some incoming objects during this GC - // adjust the consumed budget for these dd_new_allocation (dd) = dd_gc_new_allocation (dd) - in; } gen_data->pinned_surv = dd_pinned_survived_size (dd); gen_data->npinned_surv = dd_survived_size (dd) - dd_pinned_survived_size (dd); dd_promoted_size (dd) = out; if (gen_number == max_generation) { for (int i = (gen_number + 1); i < total_generation_count; i++) { dd = dynamic_data_of (i); total_gen_size = generation_size (i); generation* gen = generation_of (i); dd_fragmentation (dd) = generation_free_list_space (gen) + generation_free_obj_space (gen); dd_current_size (dd) = total_gen_size - dd_fragmentation (dd); dd_survived_size (dd) = dd_current_size (dd); in = 0; out = dd_current_size (dd); dd_desired_allocation (dd) = desired_new_allocation (dd, out, i, 0); dd_gc_new_allocation (dd) = Align (dd_desired_allocation (dd), get_alignment_constant (FALSE)); dd_new_allocation (dd) = dd_gc_new_allocation (dd); gen_data = &(current_gc_data_per_heap->gen_data[i]); gen_data->size_after = total_gen_size; gen_data->free_list_space_after = generation_free_list_space (gen); gen_data->free_obj_space_after = generation_free_obj_space (gen); gen_data->npinned_surv = out; #ifdef BACKGROUND_GC if (i == loh_generation) end_loh_size = total_gen_size; if (i == poh_generation) end_poh_size = total_gen_size; #endif //BACKGROUND_GC dd_promoted_size (dd) = out; } } } void gc_heap::trim_youngest_desired_low_memory() { if (g_low_memory_status) { size_t committed_mem = committed_size(); dynamic_data* dd = dynamic_data_of (0); size_t current = dd_desired_allocation (dd); size_t candidate = max (Align ((committed_mem / 10), get_alignment_constant(FALSE)), dd_min_size (dd)); dd_desired_allocation (dd) = min (current, candidate); } } ptrdiff_t gc_heap::estimate_gen_growth (int gen_number) { dynamic_data* dd_gen = dynamic_data_of (gen_number); generation *gen = generation_of (gen_number); ptrdiff_t new_allocation_gen = dd_new_allocation (dd_gen); ptrdiff_t free_list_space_gen = generation_free_list_space (gen); #ifdef USE_REGIONS // in the case of regions, we assume all the space up to reserved gets used before we get a new region for this gen ptrdiff_t reserved_not_in_use = 0; ptrdiff_t allocated_gen = 0; for (heap_segment* region = generation_start_segment_rw (gen); region != nullptr; region = heap_segment_next (region)) { allocated_gen += heap_segment_allocated (region) - heap_segment_mem (region); reserved_not_in_use += heap_segment_reserved (region) - heap_segment_allocated (region); } // compute how much of the allocated space is on the free list double free_list_fraction_gen = (allocated_gen == 0) ? 0.0 : (double)(free_list_space_gen) / (double)allocated_gen; // estimate amount of usable free space // e.g. if 90% of the allocated space is free, assume 90% of these 90% can get used // e.g. if 10% of the allocated space is free, assume 10% of these 10% can get used ptrdiff_t usable_free_space = (ptrdiff_t)(free_list_fraction_gen * free_list_space_gen); ptrdiff_t budget_gen = new_allocation_gen - usable_free_space - reserved_not_in_use; dprintf(1, ("h%2d gen %d budget %8Id allocated: %8Id, FL: %8Id, reserved_not_in_use %8Id budget_gen %8Id", heap_number, gen_number, new_allocation_gen, allocated_gen, free_list_space_gen, reserved_not_in_use, budget_gen)); #else //USE_REGIONS // estimate how we are going to need in this generation - estimate half the free list space gets used ptrdiff_t budget_gen = new_allocation_gen - (free_list_space_gen / 2); dprintf (REGIONS_LOG, ("budget for gen %d on heap %d is %Id (new %Id, free %Id)", gen_number, heap_number, budget_gen, new_allocation_gen, free_list_space_gen)); #endif //USE_REGIONS return budget_gen; } void gc_heap::decommit_ephemeral_segment_pages() { if (settings.concurrent || use_large_pages_p || (settings.pause_mode == pause_no_gc)) { return; } #if defined(MULTIPLE_HEAPS) && defined(USE_REGIONS) for (int gen_number = soh_gen0; gen_number <= soh_gen1; gen_number++) { generation *gen = generation_of (gen_number); heap_segment* tail_region = generation_tail_region (gen); uint8_t* previous_decommit_target = heap_segment_decommit_target (tail_region); // reset the decommit targets to make sure we don't decommit inadvertently for (heap_segment* region = generation_start_segment_rw (gen); region != nullptr; region = heap_segment_next (region)) { heap_segment_decommit_target (region) = heap_segment_reserved (region); } ptrdiff_t budget_gen = estimate_gen_growth (gen_number) + loh_size_threshold; if (budget_gen >= 0) { // we need more than the regions we have - nothing to decommit continue; } // we may have too much committed - let's see if we can decommit in the tail region ptrdiff_t tail_region_size = heap_segment_reserved (tail_region) - heap_segment_mem (tail_region); ptrdiff_t unneeded_tail_size = min (-budget_gen, tail_region_size); uint8_t *decommit_target = heap_segment_reserved (tail_region) - unneeded_tail_size; decommit_target = max (decommit_target, heap_segment_allocated (tail_region)); if (decommit_target < previous_decommit_target) { // we used to have a higher target - do exponential smoothing by computing // essentially decommit_target = 1/3*decommit_target + 2/3*previous_decommit_target // computation below is slightly different to avoid overflow ptrdiff_t target_decrease = previous_decommit_target - decommit_target; decommit_target += target_decrease * 2 / 3; } //#define STRESS_DECOMMIT 1 #ifdef STRESS_DECOMMIT // our decommit logic should work for a random decommit target within tail_region - make sure it does decommit_target = heap_segment_mem (tail_region) + gc_rand::get_rand (heap_segment_reserved (tail_region) - heap_segment_mem (tail_region)); #endif //STRESS_DECOMMIT heap_segment_decommit_target (tail_region) = decommit_target; if (decommit_target < heap_segment_committed (tail_region)) { gradual_decommit_in_progress_p = TRUE; dprintf (1, ("h%2d gen %d reduce_commit by %IdkB", heap_number, gen_number, (heap_segment_committed (tail_region) - decommit_target)/1024)); } dprintf(3, ("h%2d gen %d allocated: %IdkB committed: %IdkB target: %IdkB", heap_number, gen_number, (heap_segment_allocated (tail_region) - heap_segment_mem (tail_region))/1024, (heap_segment_committed (tail_region) - heap_segment_mem (tail_region))/1024, (decommit_target - heap_segment_mem (tail_region))/1024)); } #else //MULTIPLE_HEAPS && USE_REGIONS dynamic_data* dd0 = dynamic_data_of (0); ptrdiff_t desired_allocation = dd_new_allocation (dd0) + max (estimate_gen_growth (soh_gen1), 0) + loh_size_threshold; size_t slack_space = #ifdef HOST_64BIT max(min(min(soh_segment_size/32, dd_max_size (dd0)), (generation_size (max_generation) / 10)), (size_t)desired_allocation); #else #ifdef FEATURE_CORECLR desired_allocation; #else dd_max_size (dd0); #endif //FEATURE_CORECLR #endif // HOST_64BIT uint8_t *decommit_target = heap_segment_allocated (ephemeral_heap_segment) + slack_space; if (decommit_target < heap_segment_decommit_target (ephemeral_heap_segment)) { // we used to have a higher target - do exponential smoothing by computing // essentially decommit_target = 1/3*decommit_target + 2/3*previous_decommit_target // computation below is slightly different to avoid overflow ptrdiff_t target_decrease = heap_segment_decommit_target (ephemeral_heap_segment) - decommit_target; decommit_target += target_decrease * 2 / 3; } heap_segment_decommit_target (ephemeral_heap_segment) = decommit_target; #ifdef MULTIPLE_HEAPS if (decommit_target < heap_segment_committed (ephemeral_heap_segment)) { gradual_decommit_in_progress_p = TRUE; } #ifdef _DEBUG // these are only for checking against logic errors ephemeral_heap_segment->saved_committed = heap_segment_committed (ephemeral_heap_segment); ephemeral_heap_segment->saved_desired_allocation = dd_desired_allocation (dd0); #endif // _DEBUG #endif // MULTIPLE_HEAPS #ifndef MULTIPLE_HEAPS // we want to limit the amount of decommit we do per time to indirectly // limit the amount of time spent in recommit and page faults size_t ephemeral_elapsed = (size_t)((dd_time_clock (dd0) - gc_last_ephemeral_decommit_time) / 1000); gc_last_ephemeral_decommit_time = dd_time_clock (dd0); // this is the amount we were planning to decommit ptrdiff_t decommit_size = heap_segment_committed (ephemeral_heap_segment) - decommit_target; // we do a max of DECOMMIT_SIZE_PER_MILLISECOND per millisecond of elapsed time since the last GC // we limit the elapsed time to 10 seconds to avoid spending too much time decommitting ptrdiff_t max_decommit_size = min (ephemeral_elapsed, (10*1000)) * DECOMMIT_SIZE_PER_MILLISECOND; decommit_size = min (decommit_size, max_decommit_size); slack_space = heap_segment_committed (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment) - decommit_size; decommit_heap_segment_pages (ephemeral_heap_segment, slack_space); #endif // !MULTIPLE_HEAPS gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap(); current_gc_data_per_heap->extra_gen0_committed = heap_segment_committed (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment); #endif //MULTIPLE_HEAPS && USE_REGIONS } // return true if we actually decommitted anything bool gc_heap::decommit_step () { size_t decommit_size = 0; #ifdef USE_REGIONS const size_t max_decommit_step_size = DECOMMIT_SIZE_PER_MILLISECOND * DECOMMIT_TIME_STEP_MILLISECONDS; for (int kind = basic_free_region; kind < count_free_region_kinds; kind++) { dprintf (REGIONS_LOG, ("decommit_step %d, regions_to_decommit = %Id", kind, global_regions_to_decommit[kind].get_num_free_regions())); while (global_regions_to_decommit[kind].get_num_free_regions() > 0) { heap_segment* region = global_regions_to_decommit[kind].unlink_region_front(); uint8_t* page_start = align_lower_page(get_region_start(region)); uint8_t* end = use_large_pages_p ? heap_segment_used(region) : heap_segment_committed(region); size_t size = end - page_start; bool decommit_succeeded_p = false; if (!use_large_pages_p) { decommit_succeeded_p = virtual_decommit(page_start, size, heap_segment_oh(region), 0); dprintf(REGIONS_LOG, ("decommitted region %Ix(%Ix-%Ix) (%Iu bytes) - success: %d", region, page_start, end, size, decommit_succeeded_p)); } if (!decommit_succeeded_p) { memclr(page_start, size); dprintf(REGIONS_LOG, ("cleared region %Ix(%Ix-%Ix) (%Iu bytes)", region, page_start, end, size)); } global_region_allocator.delete_region(get_region_start(region)); decommit_size += size; if (decommit_size >= max_decommit_step_size) { return true; } } } if (use_large_pages_p) { return (decommit_size != 0); } #endif //USE_REGIONS #ifdef MULTIPLE_HEAPS // should never get here for large pages because decommit_ephemeral_segment_pages // will not do anything if use_large_pages_p is true assert(!use_large_pages_p); for (int i = 0; i < n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; decommit_size += hp->decommit_ephemeral_segment_pages_step (); } #endif //MULTIPLE_HEAPS return (decommit_size != 0); } #ifdef MULTIPLE_HEAPS // return the decommitted size size_t gc_heap::decommit_ephemeral_segment_pages_step () { size_t size = 0; #ifdef USE_REGIONS for (int gen_number = soh_gen0; gen_number <= soh_gen1; gen_number++) { generation* gen = generation_of (gen_number); heap_segment* seg = generation_tail_region (gen); #else // USE_REGIONS { heap_segment* seg = ephemeral_heap_segment; // we rely on desired allocation not being changed outside of GC assert (seg->saved_desired_allocation == dd_desired_allocation (dynamic_data_of (0))); #endif // USE_REGIONS uint8_t* decommit_target = heap_segment_decommit_target (seg); size_t EXTRA_SPACE = 2 * OS_PAGE_SIZE; decommit_target += EXTRA_SPACE; #ifdef STRESS_DECOMMIT // our decommit logic should work for a random decommit target within tail_region - make sure it does // tail region now may be different from what decommit_ephemeral_segment_pages saw decommit_target = heap_segment_mem (seg) + gc_rand::get_rand (heap_segment_reserved (seg) - heap_segment_mem (seg)); #endif //STRESS_DECOMMIT uint8_t* committed = heap_segment_committed (seg); uint8_t* allocated = (seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg); if ((allocated <= decommit_target) && (decommit_target < committed)) { #ifdef USE_REGIONS if (gen_number == soh_gen0) { // for gen 0, sync with the allocator by taking the more space lock // and re-read the variables // // we call try_enter_spin_lock here instead of enter_spin_lock because // calling enter_spin_lock from this thread can deadlock at the start // of a GC - if gc_started is already true, we call wait_for_gc_done(), // but we are on GC thread 0, so GC cannot make progress if (!try_enter_spin_lock (&more_space_lock_soh)) { continue; } add_saved_spinlock_info (false, me_acquire, mt_decommit_step); seg = generation_tail_region (gen); #ifndef STRESS_DECOMMIT decommit_target = heap_segment_decommit_target (seg); decommit_target += EXTRA_SPACE; #endif committed = heap_segment_committed (seg); allocated = (seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg); } if ((allocated <= decommit_target) && (decommit_target < committed)) #else // USE_REGIONS // we rely on other threads not messing with committed if we are about to trim it down assert (seg->saved_committed == heap_segment_committed (seg)); #endif // USE_REGIONS { // how much would we need to decommit to get to decommit_target in one step? size_t full_decommit_size = (committed - decommit_target); // don't do more than max_decommit_step_size per step size_t decommit_size = min (max_decommit_step_size, full_decommit_size); // figure out where the new committed should be uint8_t* new_committed = (committed - decommit_size); size += decommit_heap_segment_pages_worker (seg, new_committed); #ifdef _DEBUG seg->saved_committed = committed - size; #endif // _DEBUG } #ifdef USE_REGIONS if (gen_number == soh_gen0) { // for gen 0, we took the more space lock - leave it again add_saved_spinlock_info (false, me_release, mt_decommit_step); leave_spin_lock (&more_space_lock_soh); } #endif // USE_REGIONS } } return size; } #endif //MULTIPLE_HEAPS //This is meant to be called by decide_on_compacting. size_t gc_heap::generation_fragmentation (generation* gen, generation* consing_gen, uint8_t* end) { ptrdiff_t frag = 0; #ifdef USE_REGIONS for (int gen_num = 0; gen_num <= gen->gen_num; gen_num++) { generation* gen = generation_of (gen_num); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); while (seg) { frag += (heap_segment_saved_allocated (seg) - heap_segment_plan_allocated (seg)); dprintf (3, ("h%d g%d adding seg plan frag: %Ix-%Ix=%Id -> %Id", heap_number, gen_num, heap_segment_saved_allocated (seg), heap_segment_plan_allocated (seg), (heap_segment_saved_allocated (seg) - heap_segment_plan_allocated (seg)), frag)); seg = heap_segment_next_rw (seg); } } #else //USE_REGIONS uint8_t* alloc = generation_allocation_pointer (consing_gen); // If the allocation pointer has reached the ephemeral segment // fine, otherwise the whole ephemeral segment is considered // fragmentation if (in_range_for_segment (alloc, ephemeral_heap_segment)) { if (alloc <= heap_segment_allocated(ephemeral_heap_segment)) frag = end - alloc; else { // case when no survivors, allocated set to beginning frag = 0; } dprintf (3, ("ephemeral frag: %Id", frag)); } else frag = (heap_segment_allocated (ephemeral_heap_segment) - heap_segment_mem (ephemeral_heap_segment)); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(seg != NULL); while (seg != ephemeral_heap_segment) { frag += (heap_segment_allocated (seg) - heap_segment_plan_allocated (seg)); dprintf (3, ("seg: %Ix, frag: %Id", (size_t)seg, (heap_segment_allocated (seg) - heap_segment_plan_allocated (seg)))); seg = heap_segment_next_rw (seg); assert (seg); } #endif //USE_REGIONS dprintf (3, ("frag: %Id discounting pinned plugs", frag)); //add the length of the dequeued plug free space size_t bos = 0; while (bos < mark_stack_bos) { frag += (pinned_len (pinned_plug_of (bos))); dprintf (3, ("adding pinned len %Id to frag ->%Id", pinned_len (pinned_plug_of (bos)), frag)); bos++; } return frag; } // for SOH this returns the total sizes of the generation and its // younger generation(s). // for LOH this returns just LOH size. size_t gc_heap::generation_sizes (generation* gen, bool use_saved_p) { size_t result = 0; #ifdef USE_REGIONS int gen_num = gen->gen_num; int start_gen_index = ((gen_num > max_generation) ? gen_num : 0); for (int i = start_gen_index; i <= gen_num; i++) { heap_segment* seg = heap_segment_in_range (generation_start_segment (generation_of (i))); while (seg) { uint8_t* end = (use_saved_p ? heap_segment_saved_allocated (seg) : heap_segment_allocated (seg)); result += end - heap_segment_mem (seg); dprintf (3, ("h%d gen%d size + %Id (%Ix - %Ix) -> %Id", heap_number, i, (end - heap_segment_mem (seg)), heap_segment_mem (seg), end, result)); seg = heap_segment_next (seg); } } #else //USE_REGIONS if (generation_start_segment (gen ) == ephemeral_heap_segment) result = (heap_segment_allocated (ephemeral_heap_segment) - generation_allocation_start (gen)); else { heap_segment* seg = heap_segment_in_range (generation_start_segment (gen)); PREFIX_ASSUME(seg != NULL); while (seg) { result += (heap_segment_allocated (seg) - heap_segment_mem (seg)); seg = heap_segment_next_in_range (seg); } } #endif //USE_REGIONS return result; } #ifdef USE_REGIONS bool gc_heap::decide_on_compaction_space() { size_t gen0size = approximate_new_allocation(); dprintf (REGIONS_LOG, ("gen0size: %Id, free: %Id", gen0size, (num_regions_freed_in_sweep * ((size_t)1 << min_segment_size_shr)))); // If we don't compact, would we have enough space? if (sufficient_space_regions ((num_regions_freed_in_sweep * ((size_t)1 << min_segment_size_shr)), gen0size)) { dprintf (REGIONS_LOG, ("it is sufficient!")); return false; } // If we do compact, would we have enough space? get_gen0_end_plan_space(); if (!gen0_large_chunk_found) { gen0_large_chunk_found = (free_regions[basic_free_region].get_num_free_regions() > 0); } dprintf (REGIONS_LOG, ("gen0_pinned_free_space: %Id, end_gen0_region_space: %Id, gen0size: %Id", gen0_pinned_free_space, end_gen0_region_space, gen0size)); if (sufficient_space_regions ((gen0_pinned_free_space + end_gen0_region_space), gen0size) && gen0_large_chunk_found) { sufficient_gen0_space_p = TRUE; } return true; } #endif //USE_REGIONS size_t gc_heap::estimated_reclaim (int gen_number) { dynamic_data* dd = dynamic_data_of (gen_number); size_t gen_allocated = (dd_desired_allocation (dd) - dd_new_allocation (dd)); size_t gen_total_size = gen_allocated + dd_current_size (dd); size_t est_gen_surv = (size_t)((float) (gen_total_size) * dd_surv (dd)); size_t est_gen_free = gen_total_size - est_gen_surv + dd_fragmentation (dd); dprintf (GTC_LOG, ("h%d gen%d total size: %Id, est dead space: %Id (s: %d, allocated: %Id), frag: %Id", heap_number, gen_number, gen_total_size, est_gen_free, (int)(dd_surv (dd) * 100), gen_allocated, dd_fragmentation (dd))); return est_gen_free; } bool gc_heap::is_full_compacting_gc_productive() { #ifdef USE_REGIONS // If we needed to grow gen2 by extending either the end of its tail region // or having to acquire more regions for gen2, then we view this as unproductive. // // Note that when we freely choose which region to demote and promote, this calculation // will need to change. heap_segment* gen1_start_region = generation_start_segment (generation_of (max_generation - 1)); if (heap_segment_plan_gen_num (gen1_start_region) == max_generation) { dprintf (REGIONS_LOG, ("gen1 start region %Ix is now part of gen2, unproductive", heap_segment_mem (gen1_start_region))); return false; } else { heap_segment* gen2_tail_region = generation_tail_region (generation_of (max_generation)); if (heap_segment_plan_allocated (gen2_tail_region) >= heap_segment_allocated (gen2_tail_region)) { dprintf (REGIONS_LOG, ("last gen2 region extended %Ix->%Ix, unproductive", heap_segment_allocated (gen2_tail_region), heap_segment_plan_allocated (gen2_tail_region))); return false; } } return true; #else //USE_REGIONS if (generation_plan_allocation_start (generation_of (max_generation - 1)) >= generation_allocation_start (generation_of (max_generation - 1))) { dprintf (1, ("gen1 start %Ix->%Ix, gen2 size %Id->%Id, lock elevation", generation_allocation_start (generation_of (max_generation - 1)), generation_plan_allocation_start (generation_of (max_generation - 1)), generation_size (max_generation), generation_plan_size (max_generation))); return false; } else return true; #endif //USE_REGIONS } BOOL gc_heap::decide_on_compacting (int condemned_gen_number, size_t fragmentation, BOOL& should_expand) { BOOL should_compact = FALSE; should_expand = FALSE; generation* gen = generation_of (condemned_gen_number); dynamic_data* dd = dynamic_data_of (condemned_gen_number); size_t gen_sizes = generation_sizes(gen, true); float fragmentation_burden = ( ((0 == fragmentation) || (0 == gen_sizes)) ? (0.0f) : (float (fragmentation) / gen_sizes) ); dprintf (GTC_LOG, ("h%d g%d fragmentation: %Id (%d%%), gen_sizes: %Id", heap_number, settings.condemned_generation, fragmentation, (int)(fragmentation_burden * 100.0), gen_sizes)); #ifdef USE_REGIONS if (special_sweep_p) { last_gc_before_oom = FALSE; return FALSE; } #endif //USE_REGIONS #if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK) // for GC stress runs we need compaction if (GCStress<cfg_any>::IsEnabled() && !settings.concurrent) should_compact = TRUE; #endif //defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK) if (GCConfig::GetForceCompact()) should_compact = TRUE; if ((condemned_gen_number == max_generation) && last_gc_before_oom) { should_compact = TRUE; last_gc_before_oom = FALSE; get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_last_gc); } if (settings.reason == reason_induced_compacting) { dprintf (2, ("induced compacting GC")); should_compact = TRUE; get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_induced_compacting); } if (settings.reason == reason_pm_full_gc) { assert (condemned_gen_number == max_generation); if (heap_number == 0) { dprintf (GTC_LOG, ("PM doing compacting full GC after a gen1")); } should_compact = TRUE; } dprintf (2, ("Fragmentation: %d Fragmentation burden %d%%", fragmentation, (int) (100*fragmentation_burden))); if (provisional_mode_triggered && (condemned_gen_number == (max_generation - 1))) { dprintf (GTC_LOG, ("gen1 in PM always compact")); should_compact = TRUE; } #ifdef USE_REGIONS if (!should_compact) { should_compact = !!decide_on_compaction_space(); } #else //USE_REGIONS if (!should_compact) { if (dt_low_ephemeral_space_p (tuning_deciding_compaction)) { dprintf(GTC_LOG, ("compacting due to low ephemeral")); should_compact = TRUE; get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_low_ephemeral); } } if (should_compact) { if ((condemned_gen_number >= (max_generation - 1))) { if (dt_low_ephemeral_space_p (tuning_deciding_expansion)) { dprintf (GTC_LOG,("Not enough space for all ephemeral generations with compaction")); should_expand = TRUE; } } } #endif //USE_REGIONS #ifdef HOST_64BIT BOOL high_memory = FALSE; #endif // HOST_64BIT if (!should_compact) { // We are not putting this in dt_high_frag_p because it's not exactly // high fragmentation - it's just enough planned fragmentation for us to // want to compact. Also the "fragmentation" we are talking about here // is different from anywhere else. dprintf (REGIONS_LOG, ("frag: %Id, fragmentation_burden: %.3f", fragmentation, fragmentation_burden)); BOOL frag_exceeded = ((fragmentation >= dd_fragmentation_limit (dd)) && (fragmentation_burden >= dd_fragmentation_burden_limit (dd))); if (frag_exceeded) { #ifdef BACKGROUND_GC // do not force compaction if this was a stress-induced GC IN_STRESS_HEAP(if (!settings.stress_induced)) { #endif // BACKGROUND_GC assert (settings.concurrent == FALSE); should_compact = TRUE; get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_high_frag); #ifdef BACKGROUND_GC } #endif // BACKGROUND_GC } #ifdef HOST_64BIT // check for high memory situation if(!should_compact) { uint32_t num_heaps = 1; #ifdef MULTIPLE_HEAPS num_heaps = gc_heap::n_heaps; #endif // MULTIPLE_HEAPS ptrdiff_t reclaim_space = generation_size(max_generation) - generation_plan_size(max_generation); if((settings.entry_memory_load >= high_memory_load_th) && (settings.entry_memory_load < v_high_memory_load_th)) { if(reclaim_space > (int64_t)(min_high_fragmentation_threshold (entry_available_physical_mem, num_heaps))) { dprintf(GTC_LOG,("compacting due to fragmentation in high memory")); should_compact = TRUE; get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_high_mem_frag); } high_memory = TRUE; } else if(settings.entry_memory_load >= v_high_memory_load_th) { if(reclaim_space > (ptrdiff_t)(min_reclaim_fragmentation_threshold (num_heaps))) { dprintf(GTC_LOG,("compacting due to fragmentation in very high memory")); should_compact = TRUE; get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_vhigh_mem_frag); } high_memory = TRUE; } } #endif // HOST_64BIT } // The purpose of calling ensure_gap_allocation here is to make sure // that we actually are able to commit the memory to allocate generation // starts. if ((should_compact == FALSE) && (ensure_gap_allocation (condemned_gen_number) == FALSE)) { should_compact = TRUE; get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_no_gaps); } if (settings.condemned_generation == max_generation) { //check the progress if ( #ifdef HOST_64BIT (high_memory && !should_compact) || #endif // HOST_64BIT !is_full_compacting_gc_productive()) { //no progress -> lock settings.should_lock_elevation = TRUE; } } if (settings.pause_mode == pause_no_gc) { should_compact = TRUE; if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_plan_allocated (ephemeral_heap_segment)) < soh_allocation_no_gc) { should_expand = TRUE; } } dprintf (2, ("will %s(%s)", (should_compact ? "compact" : "sweep"), (should_expand ? "ex" : ""))); return should_compact; } size_t align_lower_good_size_allocation (size_t size) { return (size/64)*64; } size_t gc_heap::approximate_new_allocation() { dynamic_data* dd0 = dynamic_data_of (0); return max (2*dd_min_size (dd0), ((dd_desired_allocation (dd0)*2)/3)); } bool gc_heap::check_against_hard_limit (size_t space_required) { bool can_fit = TRUE; // If hard limit is specified, and if we attributed all that's left in commit to the ephemeral seg // so we treat that as segment end, do we have enough space. if (heap_hard_limit) { size_t left_in_commit = heap_hard_limit - current_total_committed; int num_heaps = get_num_heaps(); left_in_commit /= num_heaps; if (left_in_commit < space_required) { can_fit = FALSE; } dprintf (2, ("h%d end seg %Id, but only %Id left in HARD LIMIT commit, required: %Id %s on eph", heap_number, space_required, left_in_commit, space_required, (can_fit ? "ok" : "short"))); } return can_fit; } #ifdef USE_REGIONS bool gc_heap::sufficient_space_regions (size_t end_space, size_t end_space_required) { // REGIONS PERF TODO: we can repurpose large regions here too, if needed. size_t free_regions_space = (free_regions[basic_free_region].get_num_free_regions() * ((size_t)1 << min_segment_size_shr)) + global_region_allocator.get_free(); size_t total_alloc_space = end_space + free_regions_space; dprintf (REGIONS_LOG, ("h%d required %Id, end %Id + free %Id=%Id", heap_number, end_space_required, end_space, free_regions_space, total_alloc_space)); if (total_alloc_space > end_space_required) { return check_against_hard_limit (end_space_required); } else return false; } #else //USE_REGIONS BOOL gc_heap::sufficient_space_end_seg (uint8_t* start, uint8_t* committed, uint8_t* reserved, size_t end_space_required) { BOOL can_fit = FALSE; size_t committed_space = (size_t)(committed - start); size_t end_seg_space = (size_t)(reserved - start); if (committed_space > end_space_required) { return true; } else if (end_seg_space > end_space_required) { return check_against_hard_limit (end_space_required - committed_space); } else return false; } #endif //USE_REGIONS // After we did a GC we expect to have at least this // much space at the end of the segment to satisfy // a reasonable amount of allocation requests. size_t gc_heap::end_space_after_gc() { return max ((dd_min_size (dynamic_data_of (0))/2), (END_SPACE_AFTER_GC_FL)); } BOOL gc_heap::ephemeral_gen_fit_p (gc_tuning_point tp) { uint8_t* start = 0; #ifdef USE_REGIONS assert ((tp == tuning_deciding_condemned_gen) || (tp == tuning_deciding_full_gc)); #else//USE_REGIONS if ((tp == tuning_deciding_condemned_gen) || (tp == tuning_deciding_compaction)) { start = (settings.concurrent ? alloc_allocated : heap_segment_allocated (ephemeral_heap_segment)); if (settings.concurrent) { dprintf (2, ("%Id left at the end of ephemeral segment (alloc_allocated)", (size_t)(heap_segment_reserved (ephemeral_heap_segment) - alloc_allocated))); } else { dprintf (2, ("%Id left at the end of ephemeral segment (allocated)", (size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment)))); } } else if (tp == tuning_deciding_expansion) { start = heap_segment_plan_allocated (ephemeral_heap_segment); dprintf (2, ("%Id left at the end of ephemeral segment based on plan", (size_t)(heap_segment_reserved (ephemeral_heap_segment) - start))); } else { assert (tp == tuning_deciding_full_gc); dprintf (2, ("FGC: %Id left at the end of ephemeral segment (alloc_allocated)", (size_t)(heap_segment_reserved (ephemeral_heap_segment) - alloc_allocated))); start = alloc_allocated; } if (start == 0) // empty ephemeral generations { assert (tp == tuning_deciding_expansion); // if there are no survivors in the ephemeral segment, // this should be the beginning of ephemeral segment. start = generation_allocation_pointer (generation_of (max_generation)); assert (start == heap_segment_mem (ephemeral_heap_segment)); } if (tp == tuning_deciding_expansion) { assert (settings.condemned_generation >= (max_generation-1)); size_t gen0size = approximate_new_allocation(); size_t eph_size = gen0size; size_t gen_min_sizes = 0; for (int j = 1; j <= max_generation-1; j++) { gen_min_sizes += 2*dd_min_size (dynamic_data_of(j)); } eph_size += gen_min_sizes; dprintf (3, ("h%d deciding on expansion, need %Id (gen0: %Id, 2*min: %Id)", heap_number, gen0size, gen_min_sizes, eph_size)); // We must find room for one large object and enough room for gen0size if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - start) > eph_size) { dprintf (3, ("Enough room before end of segment")); return TRUE; } else { size_t room = align_lower_good_size_allocation (heap_segment_reserved (ephemeral_heap_segment) - start); size_t end_seg = room; //look at the plug free space size_t largest_alloc = END_SPACE_AFTER_GC_FL; bool large_chunk_found = FALSE; size_t bos = 0; uint8_t* gen0start = generation_plan_allocation_start (youngest_generation); dprintf (3, ("ephemeral_gen_fit_p: gen0 plan start: %Ix", (size_t)gen0start)); if (gen0start == 0) return FALSE; dprintf (3, ("ephemeral_gen_fit_p: room before free list search %Id, needed: %Id", room, gen0size)); while ((bos < mark_stack_bos) && !((room >= gen0size) && large_chunk_found)) { uint8_t* plug = pinned_plug (pinned_plug_of (bos)); if (in_range_for_segment (plug, ephemeral_heap_segment)) { if (plug >= gen0start) { size_t chunk = align_lower_good_size_allocation (pinned_len (pinned_plug_of (bos))); room += chunk; if (!large_chunk_found) { large_chunk_found = (chunk >= largest_alloc); } dprintf (3, ("ephemeral_gen_fit_p: room now %Id, large chunk: %Id", room, large_chunk_found)); } } bos++; } if (room >= gen0size) { if (large_chunk_found) { sufficient_gen0_space_p = TRUE; dprintf (3, ("Enough room")); return TRUE; } else { // now we need to find largest_alloc at the end of the segment. if (end_seg >= end_space_after_gc()) { dprintf (3, ("Enough room (may need end of seg)")); return TRUE; } } } dprintf (3, ("Not enough room")); return FALSE; } } else #endif //USE_REGIONS { size_t end_space = 0; dynamic_data* dd = dynamic_data_of (0); if ((tp == tuning_deciding_condemned_gen) || (tp == tuning_deciding_full_gc)) { end_space = max (2*dd_min_size (dd), end_space_after_gc()); } else { assert (tp == tuning_deciding_compaction); end_space = approximate_new_allocation(); } #ifdef USE_REGIONS size_t gen0_end_space = get_gen0_end_space(); BOOL can_fit = sufficient_space_regions (gen0_end_space, end_space); #else //USE_REGIONS BOOL can_fit = sufficient_space_end_seg (start, heap_segment_committed (ephemeral_heap_segment), heap_segment_reserved (ephemeral_heap_segment), end_space); #endif //USE_REGIONS return can_fit; } } CObjectHeader* gc_heap::allocate_uoh_object (size_t jsize, uint32_t flags, int gen_number, int64_t& alloc_bytes) { //create a new alloc context because gen3context is shared. alloc_context acontext; acontext.init(); #if HOST_64BIT size_t maxObjectSize = (INT64_MAX - 7 - Align(min_obj_size)); #else size_t maxObjectSize = (INT32_MAX - 7 - Align(min_obj_size)); #endif if (jsize >= maxObjectSize) { if (GCConfig::GetBreakOnOOM()) { GCToOSInterface::DebugBreak(); } return NULL; } size_t size = AlignQword (jsize); int align_const = get_alignment_constant (FALSE); size_t pad = 0; #ifdef FEATURE_LOH_COMPACTION if (gen_number == loh_generation) { pad = Align (loh_padding_obj_size, align_const); } #endif //FEATURE_LOH_COMPACTION assert (size >= Align (min_obj_size, align_const)); #ifdef _MSC_VER #pragma inline_depth(0) #endif //_MSC_VER if (! allocate_more_space (&acontext, (size + pad), flags, gen_number)) { return 0; } #ifdef _MSC_VER #pragma inline_depth(20) #endif //_MSC_VER #ifdef FEATURE_LOH_COMPACTION // The GC allocator made a free object already in this alloc context and // adjusted the alloc_ptr accordingly. #endif //FEATURE_LOH_COMPACTION uint8_t* result = acontext.alloc_ptr; assert ((size_t)(acontext.alloc_limit - acontext.alloc_ptr) == size); alloc_bytes += size; CObjectHeader* obj = (CObjectHeader*)result; #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { uint8_t* current_lowest_address = background_saved_lowest_address; uint8_t* current_highest_address = background_saved_highest_address; if ((result < current_highest_address) && (result >= current_lowest_address)) { dprintf (3, ("Clearing mark bit at address %Ix", (size_t)(&mark_array [mark_word_of (result)]))); mark_array_clear_marked (result); } if (current_c_gc_state != c_gc_state_free) { dprintf (3, ("Concurrent allocation of a large object %Ix", (size_t)obj)); //mark the new block specially so we know it is a new object if ((result < current_highest_address) && (result >= current_lowest_address)) { #ifdef DOUBLY_LINKED_FL heap_segment* seg = seg_mapping_table_segment_of (result); // if bgc_allocated is 0 it means it was allocated during bgc sweep, // and since sweep does not look at this seg we cannot set the mark array bit. uint8_t* background_allocated = heap_segment_background_allocated(seg); if (background_allocated != 0) #endif //DOUBLY_LINKED_FL { dprintf(3, ("Setting mark bit at address %Ix", (size_t)(&mark_array[mark_word_of(result)]))); mark_array_set_marked(result); } } } } #endif //BACKGROUND_GC assert (obj != 0); assert ((size_t)obj == Align ((size_t)obj, align_const)); return obj; } void reset_memory (uint8_t* o, size_t sizeo) { if (gc_heap::use_large_pages_p) return; if (sizeo > 128 * 1024) { // We cannot reset the memory for the useful part of a free object. size_t size_to_skip = min_free_list - plug_skew; size_t page_start = align_on_page ((size_t)(o + size_to_skip)); size_t size = align_lower_page ((size_t)o + sizeo - size_to_skip - plug_skew) - page_start; // Note we need to compensate for an OS bug here. This bug would cause the MEM_RESET to fail // on write watched memory. if (reset_mm_p && gc_heap::dt_high_memory_load_p()) { #ifdef MULTIPLE_HEAPS bool unlock_p = true; #else // We don't do unlock because there could be many processes using workstation GC and it's // bad perf to have many threads doing unlock at the same time. bool unlock_p = false; #endif //MULTIPLE_HEAPS reset_mm_p = GCToOSInterface::VirtualReset((void*)page_start, size, unlock_p); } } } BOOL gc_heap::uoh_object_marked (uint8_t* o, BOOL clearp) { BOOL m = FALSE; // It shouldn't be necessary to do these comparisons because this is only used for blocking // GCs and LOH segments cannot be out of range. if ((o >= lowest_address) && (o < highest_address)) { if (marked (o)) { if (clearp) { clear_marked (o); if (pinned (o)) clear_pinned(o); } m = TRUE; } else m = FALSE; } else m = TRUE; return m; } void gc_heap::walk_survivors_relocation (void* profiling_context, record_surv_fn fn) { // Now walk the portion of memory that is actually being relocated. walk_relocation (profiling_context, fn); #ifdef FEATURE_LOH_COMPACTION if (loh_compacted_p) { walk_relocation_for_loh (profiling_context, fn); } #endif //FEATURE_LOH_COMPACTION } void gc_heap::walk_survivors_for_uoh (void* profiling_context, record_surv_fn fn, int gen_number) { generation* gen = generation_of (gen_number); heap_segment* seg = heap_segment_rw (generation_start_segment (gen));; PREFIX_ASSUME(seg != NULL); uint8_t* o = get_uoh_start_object (seg, gen); uint8_t* plug_end = o; uint8_t* plug_start = o; while (1) { if (o >= heap_segment_allocated (seg)) { seg = heap_segment_next (seg); if (seg == 0) break; else o = heap_segment_mem (seg); } if (uoh_object_marked(o, FALSE)) { plug_start = o; BOOL m = TRUE; while (m) { o = o + AlignQword (size (o)); if (o >= heap_segment_allocated (seg)) { break; } m = uoh_object_marked (o, FALSE); } plug_end = o; fn (plug_start, plug_end, 0, profiling_context, false, false); } else { while (o < heap_segment_allocated (seg) && !uoh_object_marked(o, FALSE)) { o = o + AlignQword (size (o)); } } } } #ifdef BACKGROUND_GC BOOL gc_heap::background_object_marked (uint8_t* o, BOOL clearp) { BOOL m = FALSE; if ((o >= background_saved_lowest_address) && (o < background_saved_highest_address)) { if (mark_array_marked (o)) { if (clearp) { mark_array_clear_marked (o); //dprintf (3, ("mark array bit for object %Ix is cleared", o)); dprintf (3, ("CM: %Ix", o)); } m = TRUE; } else m = FALSE; } else m = TRUE; dprintf (3, ("o %Ix(%d) %s", o, size(o), (m ? "was bm" : "was NOT bm"))); return m; } void gc_heap::background_delay_delete_uoh_segments() { for (int i = uoh_start_generation; i < total_generation_count; i++) { generation* gen = generation_of (i); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); heap_segment* prev_seg = 0; #ifdef USE_REGIONS heap_segment* first_remaining_region = 0; #endif //USE_REGIONS while (seg) { heap_segment* next_seg = heap_segment_next (seg); if (seg->flags & heap_segment_flags_uoh_delete) { dprintf (3, ("deleting %Ix-%Ix-%Ix", (size_t)seg, heap_segment_allocated (seg), heap_segment_reserved (seg))); delete_heap_segment (seg, (GCConfig::GetRetainVM() != 0)); heap_segment_next (prev_seg) = next_seg; #ifdef USE_REGIONS update_start_tail_regions (gen, seg, prev_seg, next_seg); #endif //USE_REGIONS } else { #ifdef USE_REGIONS if (!first_remaining_region) first_remaining_region = seg; #endif //USE_REGIONS prev_seg = seg; } seg = next_seg; } #ifdef USE_REGIONS assert (heap_segment_rw (generation_start_segment (gen)) == generation_start_segment (gen)); if (generation_start_segment (gen) != first_remaining_region) { dprintf (REGIONS_LOG, ("h%d gen%d start %Ix -> %Ix", heap_number, gen->gen_num, heap_segment_mem (generation_start_segment (gen)), heap_segment_mem (first_remaining_region))); generation_start_segment (gen) = first_remaining_region; } if (generation_tail_region (gen) != prev_seg) { dprintf (REGIONS_LOG, ("h%d gen%d start %Ix -> %Ix", heap_number, gen->gen_num, heap_segment_mem (generation_tail_region (gen)), heap_segment_mem (prev_seg))); generation_tail_region (gen) = prev_seg; } #endif //USE_REGIONS } } uint8_t* gc_heap::background_next_end (heap_segment* seg, BOOL uoh_objects_p) { return (uoh_objects_p ? heap_segment_allocated (seg) : heap_segment_background_allocated (seg)); } void gc_heap::set_mem_verify (uint8_t* start, uint8_t* end, uint8_t b) { #ifdef VERIFY_HEAP if (end > start) { if ((GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) && !(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_NO_MEM_FILL)) { dprintf (3, ("setting mem to %c [%Ix, [%Ix", b, start, end)); memset (start, b, (end - start)); } } #endif //VERIFY_HEAP } void gc_heap::generation_delete_heap_segment (generation* gen, heap_segment* seg, heap_segment* prev_seg, heap_segment* next_seg) { dprintf (3, ("bgc sweep: deleting seg %Ix(%Ix), next %Ix(%Ix), prev %Ix(%Ix)", (size_t)seg, heap_segment_mem (seg), (size_t)next_seg, (next_seg ? heap_segment_mem (next_seg) : 0), (size_t)prev_seg, (prev_seg ? heap_segment_mem (prev_seg) : 0))); if (gen->gen_num > max_generation) { dprintf (3, ("Preparing empty large segment %Ix for deletion", (size_t)seg)); // We cannot thread segs in here onto freeable_uoh_segment because // grow_brick_card_tables could be committing mark array which needs to read // the seg list. So we delay it till next time we suspend EE. seg->flags |= heap_segment_flags_uoh_delete; // Since we will be decommitting the seg, we need to prevent heap verification // to verify this segment. heap_segment_allocated (seg) = heap_segment_mem (seg); } else { assert (seg != ephemeral_heap_segment); #ifdef DOUBLY_LINKED_FL // For doubly linked list we go forward for SOH heap_segment_next (prev_seg) = next_seg; #else //DOUBLY_LINKED_FL heap_segment_next (next_seg) = prev_seg; #endif //DOUBLY_LINKED_FL dprintf (3, ("Preparing empty small segment %Ix for deletion", (size_t)seg)); heap_segment_next (seg) = freeable_soh_segment; freeable_soh_segment = seg; #ifdef USE_REGIONS #ifdef DOUBLY_LINKED_FL heap_segment* next_region = next_seg; heap_segment* prev_region = prev_seg; #else //DOUBLY_LINKED_FL heap_segment* next_region = prev_seg; heap_segment* prev_region = next_seg; #endif //DOUBLY_LINKED_FL update_start_tail_regions (gen, seg, prev_region, next_region); #endif //USE_REGIONS } decommit_heap_segment (seg); seg->flags |= heap_segment_flags_decommitted; set_mem_verify (heap_segment_allocated (seg) - plug_skew, heap_segment_used (seg), 0xbb); } void gc_heap::process_background_segment_end (heap_segment* seg, generation* gen, uint8_t* last_plug_end, heap_segment* start_seg, BOOL* delete_p, size_t free_obj_size_last_gap) { *delete_p = FALSE; uint8_t* allocated = heap_segment_allocated (seg); uint8_t* background_allocated = heap_segment_background_allocated (seg); BOOL uoh_p = heap_segment_uoh_p (seg); dprintf (3, ("EoS [%Ix, %Ix[(%Ix[), last: %Ix(%Id)", (size_t)heap_segment_mem (seg), background_allocated, allocated, last_plug_end, free_obj_size_last_gap)); if (!uoh_p && (allocated != background_allocated)) { assert (gen->gen_num <= max_generation); dprintf (3, ("Make a free object before newly promoted objects [%Ix, %Ix[", (size_t)last_plug_end, background_allocated)); size_t last_gap = background_allocated - last_plug_end; if (last_gap > 0) { thread_gap (last_plug_end, last_gap, generation_of (max_generation)); add_gen_free (max_generation, last_gap); fix_brick_to_highest (last_plug_end, background_allocated); // When we allowed fgc's during going through gaps, we could have erased the brick // that corresponds to bgc_allocated 'cause we had to update the brick there, // recover it here. fix_brick_to_highest (background_allocated, background_allocated); } } else { // by default, if allocated == background_allocated, it can't // be the ephemeral segment. if (seg == ephemeral_heap_segment) { FATAL_GC_ERROR(); } #ifndef USE_REGIONS if (allocated == heap_segment_mem (seg)) { // this can happen with UOH segments when multiple threads // allocate new segments and not all of them were needed to // satisfy allocation requests. assert (gen->gen_num > max_generation); } #endif //!USE_REGIONS if (last_plug_end == heap_segment_mem (seg)) { // REGIONS TODO: start_seg doesn't matter for regions. We can get rid of it too. // Just need to update the start segment accordingly in generation_delete_heap_segment. // Also this might leave us with no regions at all for gen2 and we should be prepared // for that. One approach is to ensure at least one region per generation at the beginning // of a GC. if (seg != start_seg) { *delete_p = TRUE; } dprintf (3, ("h%d seg %Ix %s be deleted", heap_number, heap_segment_mem (seg), (*delete_p ? "should" : "should not"))); } if (!*delete_p) { dprintf (3, ("[h%d] seg %Ix alloc %Ix->%Ix", heap_number, (size_t)seg, heap_segment_allocated (seg), (size_t)last_plug_end)); heap_segment_allocated (seg) = last_plug_end; set_mem_verify (heap_segment_allocated (seg) - plug_skew, heap_segment_used (seg), 0xbb); decommit_heap_segment_pages (seg, 0); } } if (free_obj_size_last_gap) { generation_free_obj_space (gen) -= free_obj_size_last_gap; dprintf (2, ("[h%d] PS: gen2FO-: %Id->%Id", heap_number, free_obj_size_last_gap, generation_free_obj_space (gen))); } dprintf (3, ("verifying seg %Ix's mark array was completely cleared", seg)); bgc_verify_mark_array_cleared (seg); } inline BOOL gc_heap::fgc_should_consider_object (uint8_t* o, heap_segment* seg, BOOL consider_bgc_mark_p, BOOL check_current_sweep_p, BOOL check_saved_sweep_p) { #ifdef USE_REGIONS assert (!check_saved_sweep_p); #endif //USE_REGIONS // the logic for this function must be kept in sync with the analogous function // in ToolBox\SOS\Strike\gc.cpp // TRUE means we don't need to check the bgc mark bit // FALSE means we do. BOOL no_bgc_mark_p = FALSE; if (consider_bgc_mark_p) { if (check_current_sweep_p && (o < current_sweep_pos)) { dprintf (3, ("no bgc mark - o: %Ix < cs: %Ix", o, current_sweep_pos)); no_bgc_mark_p = TRUE; } if (!no_bgc_mark_p) { #ifndef USE_REGIONS if(check_saved_sweep_p && (o >= saved_sweep_ephemeral_start)) { dprintf (3, ("no bgc mark - o: %Ix >= ss: %Ix", o, saved_sweep_ephemeral_start)); no_bgc_mark_p = TRUE; } #endif //!USE_REGIONS if (!check_saved_sweep_p) { uint8_t* background_allocated = heap_segment_background_allocated (seg); #ifndef USE_REGIONS // if this was the saved ephemeral segment, check_saved_sweep_p // would've been true. assert (heap_segment_background_allocated (seg) != saved_sweep_ephemeral_start); #endif //!USE_REGIONS // background_allocated could be 0 for the new segments acquired during bgc // sweep and we still want no_bgc_mark_p to be true. if (o >= background_allocated) { dprintf (3, ("no bgc mark - o: %Ix >= ba: %Ix", o, background_allocated)); no_bgc_mark_p = TRUE; } } } } else { no_bgc_mark_p = TRUE; } dprintf (3, ("bgc mark %Ix: %s (bm: %s)", o, (no_bgc_mark_p ? "no" : "yes"), ((no_bgc_mark_p || background_object_marked (o, FALSE)) ? "yes" : "no"))); return (no_bgc_mark_p ? TRUE : background_object_marked (o, FALSE)); } // consider_bgc_mark_p tells you if you need to care about the bgc mark bit at all // if it's TRUE, check_current_sweep_p tells you if you should consider the // current sweep position or not. void gc_heap::should_check_bgc_mark (heap_segment* seg, BOOL* consider_bgc_mark_p, BOOL* check_current_sweep_p, BOOL* check_saved_sweep_p) { // the logic for this function must be kept in sync with the analogous function // in ToolBox\SOS\Strike\gc.cpp *consider_bgc_mark_p = FALSE; *check_current_sweep_p = FALSE; *check_saved_sweep_p = FALSE; if (current_c_gc_state == c_gc_state_planning) { // We are doing the current_sweep_pos comparison here because we have yet to // turn on the swept flag for the segment but in_range_for_segment will return // FALSE if the address is the same as reserved. if ((seg->flags & heap_segment_flags_swept) || (current_sweep_pos == heap_segment_reserved (seg))) { dprintf (3, ("seg %Ix is already swept by bgc", seg)); } else if (heap_segment_background_allocated (seg) == 0) { dprintf (3, ("seg %Ix newly alloc during bgc")); } else { *consider_bgc_mark_p = TRUE; dprintf (3, ("seg %Ix hasn't been swept by bgc", seg)); #ifndef USE_REGIONS if (seg == saved_sweep_ephemeral_seg) { dprintf (3, ("seg %Ix is the saved ephemeral seg", seg)); *check_saved_sweep_p = TRUE; } #endif //!USE_REGIONS if (in_range_for_segment (current_sweep_pos, seg)) { dprintf (3, ("current sweep pos is %Ix and within seg %Ix", current_sweep_pos, seg)); *check_current_sweep_p = TRUE; } } } } // REGIONS TODO: I'm not releasing any empty ephemeral regions here the gen0 allocator is // iterating over these regions. We'd want to do the same as what we do with LOH segs/regions. void gc_heap::background_ephemeral_sweep() { dprintf (3, ("bgc ephemeral sweep")); int align_const = get_alignment_constant (TRUE); #ifndef USE_REGIONS saved_sweep_ephemeral_seg = ephemeral_heap_segment; saved_sweep_ephemeral_start = generation_allocation_start (generation_of (max_generation - 1)); #endif //!USE_REGIONS // Since we don't want to interfere with gen0 allocation while we are threading gen0 free list, // we thread onto a list first then publish it when we are done. allocator youngest_free_list; size_t youngest_free_list_space = 0; size_t youngest_free_obj_space = 0; youngest_free_list.clear(); for (int i = 0; i <= (max_generation - 1); i++) { generation* gen_to_reset = generation_of (i); assert (generation_free_list_space (gen_to_reset) == 0); // Can only assert free_list_space is 0, not free_obj_space as the allocator could have added // something there. } for (int i = (max_generation - 1); i >= 0; i--) { generation* current_gen = generation_of (i); #ifdef USE_REGIONS heap_segment* ephemeral_region = heap_segment_rw (generation_start_segment (current_gen)); while (ephemeral_region) #endif //USE_REGIONS { #ifdef USE_REGIONS uint8_t* o = heap_segment_mem (ephemeral_region); uint8_t* end = heap_segment_background_allocated (ephemeral_region); dprintf (3, ("bgc eph: gen%d seg %Ix(%Ix-%Ix)", heap_segment_gen_num (ephemeral_region), heap_segment_mem (ephemeral_region), heap_segment_allocated (ephemeral_region), heap_segment_background_allocated (ephemeral_region))); // This doesn't conflict with the allocator getting a new region in gen0. // If the allocator just threaded a region onto the gen0 region list we will // read that region and detect that its background allocated is 0. if (!end) { ephemeral_region->flags |= heap_segment_flags_swept; ephemeral_region = heap_segment_next (ephemeral_region); continue; } #else //USE_REGIONS uint8_t* o = generation_allocation_start (current_gen); //Skip the generation gap object o = o + Align(size (o), align_const); uint8_t* end = ((i > 0) ? generation_allocation_start (generation_of (i - 1)) : heap_segment_allocated (ephemeral_heap_segment)); #endif //USE_REGIONS uint8_t* plug_end = o; uint8_t* plug_start = o; BOOL marked_p = FALSE; while (o < end) { marked_p = background_object_marked (o, TRUE); if (marked_p) { plug_start = o; size_t plug_size = plug_start - plug_end; if (i >= 1) { thread_gap (plug_end, plug_size, current_gen); } else { if (plug_size > 0) { make_unused_array (plug_end, plug_size); if (plug_size >= min_free_list) { youngest_free_list_space += plug_size; youngest_free_list.thread_item (plug_end, plug_size); } else { youngest_free_obj_space += plug_size; } } } fix_brick_to_highest (plug_end, plug_start); fix_brick_to_highest (plug_start, plug_start); BOOL m = TRUE; while (m) { o = o + Align (size (o), align_const); if (o >= end) { break; } m = background_object_marked (o, TRUE); } plug_end = o; dprintf (3, ("bgs: plug [%Ix, %Ix[", (size_t)plug_start, (size_t)plug_end)); } else { while ((o < end) && !background_object_marked (o, FALSE)) { o = o + Align (size (o), align_const); } } } if (plug_end != end) { if (i >= 1) { thread_gap (plug_end, end - plug_end, current_gen); } else { #ifndef USE_REGIONS heap_segment_allocated (ephemeral_heap_segment) = plug_end; heap_segment_saved_bg_allocated (ephemeral_heap_segment) = plug_end; #endif //!USE_REGIONS make_unused_array (plug_end, (end - plug_end)); } fix_brick_to_highest (plug_end, end); } #ifdef USE_REGIONS ephemeral_region->flags |= heap_segment_flags_swept; // Setting this to 0 so background_sweep can terminate for SOH. heap_segment_background_allocated (ephemeral_region) = 0; ephemeral_region = heap_segment_next (ephemeral_region); #endif //USE_REGIONS } dd_fragmentation (dynamic_data_of (i)) = generation_free_list_space (current_gen) + generation_free_obj_space (current_gen); } generation* youngest_gen = generation_of (0); generation_free_list_space (youngest_gen) = youngest_free_list_space; generation_free_obj_space (youngest_gen) = youngest_free_obj_space; dd_fragmentation (dynamic_data_of (0)) = youngest_free_list_space + youngest_free_obj_space; generation_allocator (youngest_gen)->copy_with_no_repair (&youngest_free_list); } void gc_heap::background_sweep() { //concurrent_print_time_delta ("finished with mark and start with sweep"); concurrent_print_time_delta ("Sw"); dprintf (2, ("---- (GC%d)Background Sweep Phase ----", VolatileLoad(&settings.gc_index))); //block concurrent allocation for large objects dprintf (3, ("lh state: planning")); for (int i = 0; i <= max_generation; i++) { generation* gen_to_reset = generation_of (i); #ifdef DOUBLY_LINKED_FL if (i == max_generation) { dprintf (2, ("h%d: gen2 still has FL: %Id, FO: %Id", heap_number, generation_free_list_space (gen_to_reset), generation_free_obj_space (gen_to_reset))); } else #endif //DOUBLY_LINKED_FL { generation_allocator (gen_to_reset)->clear(); generation_free_list_space (gen_to_reset) = 0; generation_free_obj_space (gen_to_reset) = 0; } generation_free_list_allocated (gen_to_reset) = 0; generation_end_seg_allocated (gen_to_reset) = 0; generation_condemned_allocated (gen_to_reset) = 0; generation_sweep_allocated (gen_to_reset) = 0; //reset the allocation so foreground gc can allocate into older generation generation_allocation_pointer (gen_to_reset)= 0; generation_allocation_limit (gen_to_reset) = 0; generation_allocation_segment (gen_to_reset) = heap_segment_rw (generation_start_segment (gen_to_reset)); } FIRE_EVENT(BGC2ndNonConEnd); uoh_alloc_thread_count = 0; init_free_and_plug(); current_bgc_state = bgc_sweep_soh; verify_soh_segment_list(); #ifdef DOUBLY_LINKED_FL // set the initial segment and position so that foreground GC knows where BGC is with the sweep current_sweep_seg = heap_segment_rw (generation_start_segment (generation_of (max_generation))); current_sweep_pos = 0; #endif //DOUBLY_LINKED_FL #ifdef FEATURE_BASICFREEZE generation* max_gen = generation_of (max_generation); if ((generation_start_segment (max_gen) != ephemeral_heap_segment) && ro_segments_in_range) { sweep_ro_segments (generation_start_segment (max_gen)); } #endif // FEATURE_BASICFREEZE if (current_c_gc_state != c_gc_state_planning) { current_c_gc_state = c_gc_state_planning; } concurrent_print_time_delta ("Swe"); for (int i = uoh_start_generation; i < total_generation_count; i++) { heap_segment* uoh_seg = heap_segment_rw (generation_start_segment (generation_of (i))); PREFIX_ASSUME(uoh_seg != NULL); while (uoh_seg) { uoh_seg->flags &= ~heap_segment_flags_swept; heap_segment_background_allocated (uoh_seg) = heap_segment_allocated (uoh_seg); uoh_seg = heap_segment_next_rw (uoh_seg); } } #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_restart_ee); if (bgc_t_join.joined()) { dprintf(2, ("Starting BGC threads for resuming EE")); bgc_t_join.restart(); } #endif //MULTIPLE_HEAPS if (heap_number == 0) { #ifdef BGC_SERVO_TUNING get_and_reset_loh_alloc_info(); #endif //BGC_SERVO_TUNING uint64_t suspended_end_ts = GetHighPrecisionTimeStamp(); last_bgc_info[last_bgc_info_index].pause_durations[1] = (size_t)(suspended_end_ts - suspended_start_time); total_suspended_time += last_bgc_info[last_bgc_info_index].pause_durations[1]; restart_EE (); } FIRE_EVENT(BGC2ndConBegin); background_ephemeral_sweep(); concurrent_print_time_delta ("Swe eph"); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_after_ephemeral_sweep); if (bgc_t_join.joined()) #endif //MULTIPLE_HEAPS { #ifdef FEATURE_EVENT_TRACE bgc_heap_walk_for_etw_p = GCEventStatus::IsEnabled(GCEventProvider_Default, GCEventKeyword_GCHeapSurvivalAndMovement, GCEventLevel_Information); #endif //FEATURE_EVENT_TRACE leave_spin_lock (&gc_lock); #ifdef MULTIPLE_HEAPS dprintf(2, ("Starting BGC threads for BGC sweeping")); bgc_t_join.restart(); #endif //MULTIPLE_HEAPS } disable_preemptive (true); dynamic_data* dd = dynamic_data_of (max_generation); const int num_objs = 256; int current_num_objs = 0; for (int i = max_generation; i < total_generation_count; i++) { generation* gen = generation_of (i); heap_segment* gen_start_seg = heap_segment_rw (generation_start_segment(gen)); heap_segment* next_seg = 0; heap_segment* prev_seg; heap_segment* start_seg; int align_const = get_alignment_constant (i == max_generation); #ifndef DOUBLY_LINKED_FL if (i == max_generation) { #ifdef USE_REGIONS start_seg = generation_tail_region (gen); #else // start with saved ephemeral segment // we are no longer holding gc_lock, so a new ephemeral segment could be added, we want the saved one. start_seg = saved_sweep_ephemeral_seg; #endif //USE_REGIONS prev_seg = heap_segment_next(start_seg); } else #endif //!DOUBLY_LINKED_FL { // If we use doubly linked FL we don't need to go backwards as we are maintaining the free list. start_seg = gen_start_seg; prev_seg = NULL; if (i > max_generation) { // UOH allocations are allowed while sweeping SOH, so // we defer clearing UOH free lists until we start sweeping them generation_allocator (gen)->clear(); generation_free_list_space (gen) = 0; generation_free_obj_space (gen) = 0; generation_free_list_allocated (gen) = 0; generation_end_seg_allocated (gen) = 0; generation_condemned_allocated (gen) = 0; generation_sweep_allocated (gen) = 0; generation_allocation_pointer (gen)= 0; generation_allocation_limit (gen) = 0; generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen)); } else { dprintf (3333, ("h%d: SOH sweep start on seg %Ix: total FL: %Id, FO: %Id", heap_number, (size_t)start_seg, generation_free_list_space (gen), generation_free_obj_space (gen))); } } PREFIX_ASSUME(start_seg != NULL); heap_segment* seg = start_seg; dprintf (2, ("bgs: sweeping gen %Ix seg %Ix->%Ix(%Ix)", gen->gen_num, heap_segment_mem (seg), heap_segment_allocated (seg), heap_segment_background_allocated (seg))); while (seg #ifdef DOUBLY_LINKED_FL // We no longer go backwards in segment list for SOH so we need to bail when we see // segments newly allocated during bgc sweep. && !((heap_segment_background_allocated (seg) == 0) && (gen != large_object_generation)) #endif //DOUBLY_LINKED_FL ) { uint8_t* o = heap_segment_mem (seg); if (seg == gen_start_seg) { #ifndef USE_REGIONS assert (o == generation_allocation_start (gen)); assert (method_table (o) == g_gc_pFreeObjectMethodTable); o = o + Align (size (o), align_const); #endif //!USE_REGIONS } uint8_t* plug_end = o; current_sweep_pos = o; next_sweep_obj = o; #ifdef DOUBLY_LINKED_FL current_sweep_seg = seg; #endif //DOUBLY_LINKED_FL // This records the total size of free objects (including the ones on and not on FL) // in the gap and it gets set to 0 when we encounter a plug. If the last gap we saw // on a seg is unmarked, we will process this in process_background_segment_end. size_t free_obj_size_last_gap = 0; allow_fgc(); uint8_t* end = background_next_end (seg, (i > max_generation)); dprintf (3333, ("bgs: seg: %Ix, [%Ix, %Ix[%Ix", (size_t)seg, (size_t)heap_segment_mem (seg), (size_t)heap_segment_allocated (seg), (size_t)heap_segment_background_allocated (seg))); while (o < end) { if (background_object_marked (o, TRUE)) { uint8_t* plug_start = o; if (i > max_generation) { dprintf (2, ("uoh fr: [%Ix-%Ix[(%Id)", plug_end, plug_start, plug_start-plug_end)); } thread_gap (plug_end, plug_start-plug_end, gen); if (i == max_generation) { add_gen_free (max_generation, plug_start-plug_end); #ifdef DOUBLY_LINKED_FL if (free_obj_size_last_gap) { generation_free_obj_space (gen) -= free_obj_size_last_gap; dprintf (3333, ("[h%d] LG: gen2FO-: %Id->%Id", heap_number, free_obj_size_last_gap, generation_free_obj_space (gen))); free_obj_size_last_gap = 0; } #endif //DOUBLY_LINKED_FL fix_brick_to_highest (plug_end, plug_start); // we need to fix the brick for the next plug here 'cause an FGC can // happen and can't read a stale brick. fix_brick_to_highest (plug_start, plug_start); } do { next_sweep_obj = o + Align (size (o), align_const); current_num_objs++; if (current_num_objs >= num_objs) { current_sweep_pos = next_sweep_obj; allow_fgc(); current_num_objs = 0; } o = next_sweep_obj; } while ((o < end) && background_object_marked(o, TRUE)); plug_end = o; if (i == max_generation) { add_gen_plug (max_generation, plug_end-plug_start); dd_survived_size (dd) += (plug_end - plug_start); } dprintf (3, ("bgs: plug [%Ix, %Ix[", (size_t)plug_start, (size_t)plug_end)); } while ((o < end) && !background_object_marked (o, FALSE)) { size_t size_o = Align(size (o), align_const); next_sweep_obj = o + size_o; #ifdef DOUBLY_LINKED_FL if (gen != large_object_generation) { if (method_table (o) == g_gc_pFreeObjectMethodTable) { free_obj_size_last_gap += size_o; if (is_on_free_list (o, size_o)) { generation_allocator (gen)->unlink_item_no_undo (o, size_o); generation_free_list_space (gen) -= size_o; assert ((ptrdiff_t)generation_free_list_space (gen) >= 0); generation_free_obj_space (gen) += size_o; dprintf (3333, ("[h%d] gen2F-: %Ix->%Ix(%Id) FL: %Id", heap_number, o, (o + size_o), size_o, generation_free_list_space (gen))); dprintf (3333, ("h%d: gen2FO+: %Ix(%Id)->%Id (g: %Id)", heap_number, o, size_o, generation_free_obj_space (gen), free_obj_size_last_gap)); remove_gen_free (max_generation, size_o); } else { // this was not on the free list so it was already part of // free_obj_space, so no need to substract from it. However, // we do need to keep track in this gap's FO space. dprintf (3333, ("h%d: gen2FO: %Ix(%Id)->%Id (g: %Id)", heap_number, o, size_o, generation_free_obj_space (gen), free_obj_size_last_gap)); } dprintf (3333, ("h%d: total FO: %Ix->%Ix FL: %Id, FO: %Id (g: %Id)", heap_number, plug_end, next_sweep_obj, generation_free_list_space (gen), generation_free_obj_space (gen), free_obj_size_last_gap)); } } #endif //DOUBLY_LINKED_FL current_num_objs++; if (current_num_objs >= num_objs) { current_sweep_pos = plug_end; dprintf (1234, ("f: swept till %Ix", current_sweep_pos)); allow_fgc(); current_num_objs = 0; } o = next_sweep_obj; } } #ifdef DOUBLY_LINKED_FL next_seg = heap_segment_next (seg); #else //DOUBLY_LINKED_FL if (i > max_generation) { next_seg = heap_segment_next (seg); } else { // For SOH segments we go backwards. next_seg = heap_segment_prev (gen_start_seg, seg); } #endif //DOUBLY_LINKED_FL BOOL delete_p = FALSE; if (!heap_segment_read_only_p (seg)) { if (i > max_generation) { // we can treat all UOH segments as in the bgc domain // regardless of whether we saw in bgc mark or not // because we don't allow UOH allocations during bgc // sweep anyway - the UOH segments can't change. process_background_segment_end (seg, gen, plug_end, start_seg, &delete_p, 0); } else { assert (heap_segment_background_allocated (seg) != 0); process_background_segment_end (seg, gen, plug_end, start_seg, &delete_p, free_obj_size_last_gap); #ifndef USE_REGIONS assert (next_seg || !delete_p); #endif //!USE_REGIONS } } heap_segment* saved_prev_seg = prev_seg; if (delete_p) { generation_delete_heap_segment (gen, seg, prev_seg, next_seg); } else { prev_seg = seg; dprintf (2, ("seg %Ix (%Ix) has been swept", seg, heap_segment_mem (seg))); seg->flags |= heap_segment_flags_swept; current_sweep_pos = end; } verify_soh_segment_list(); #ifdef DOUBLY_LINKED_FL while (next_seg && heap_segment_background_allocated (next_seg) == 0) { dprintf (2, ("[h%d] skip new %Ix ", heap_number, next_seg)); next_seg = heap_segment_next (next_seg); } #endif //DOUBLY_LINKED_FL dprintf (GTC_LOG, ("seg: %Ix(%Ix), next_seg: %Ix(%Ix), prev_seg: %Ix(%Ix), delete_p %d", seg, (seg ? heap_segment_mem (seg) : 0), next_seg, (next_seg ? heap_segment_mem (next_seg) : 0), saved_prev_seg, (saved_prev_seg ? heap_segment_mem (saved_prev_seg) : 0), (delete_p ? 1 : 0))); seg = next_seg; } generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(generation_allocation_segment(gen) != NULL); if (i == max_generation) { dprintf (2, ("bgs: sweeping uoh objects")); concurrent_print_time_delta ("Swe SOH"); FIRE_EVENT(BGC1stSweepEnd, 0); enter_spin_lock (&more_space_lock_uoh); add_saved_spinlock_info (true, me_acquire, mt_bgc_uoh_sweep); concurrent_print_time_delta ("Swe UOH took msl"); // We wait till all allocating threads are completely done. int spin_count = yp_spin_count_unit; while (uoh_alloc_thread_count) { spin_and_switch (spin_count, (uoh_alloc_thread_count == 0)); } current_bgc_state = bgc_sweep_uoh; } } size_t total_soh_size = generation_sizes (generation_of (max_generation)); size_t total_loh_size = generation_size (loh_generation); size_t total_poh_size = generation_size (poh_generation); dprintf (GTC_LOG, ("h%d: S: poh: %Id, loh: %Id, soh: %Id", heap_number, total_poh_size, total_loh_size, total_soh_size)); dprintf (GTC_LOG, ("end of bgc sweep: gen2 FL: %Id, FO: %Id", generation_free_list_space (generation_of (max_generation)), generation_free_obj_space (generation_of (max_generation)))); dprintf (GTC_LOG, ("h%d: end of bgc sweep: loh FL: %Id, FO: %Id", heap_number, generation_free_list_space (generation_of (loh_generation)), generation_free_obj_space (generation_of (loh_generation)))); dprintf (GTC_LOG, ("h%d: end of bgc sweep: poh FL: %Id, FO: %Id", heap_number, generation_free_list_space (generation_of (poh_generation)), generation_free_obj_space (generation_of (poh_generation)))); FIRE_EVENT(BGC2ndConEnd); concurrent_print_time_delta ("background sweep"); heap_segment* reset_seg = heap_segment_rw (generation_start_segment (generation_of (max_generation))); PREFIX_ASSUME(reset_seg != NULL); while (reset_seg) { heap_segment_saved_bg_allocated (reset_seg) = heap_segment_background_allocated (reset_seg); heap_segment_background_allocated (reset_seg) = 0; reset_seg = heap_segment_next_rw (reset_seg); } // We calculate dynamic data here because if we wait till we signal the lh event, // the allocation thread can change the fragmentation and we may read an intermediate // value (which can be greater than the generation size). Plus by that time it won't // be accurate. compute_new_dynamic_data (max_generation); #ifdef DOUBLY_LINKED_FL current_bgc_state = bgc_not_in_process; // We can have an FGC triggered before we set the global state to free // so we need to not have left over current_sweep_seg that point to // a segment that might've been deleted at the beginning of an FGC. current_sweep_seg = 0; #endif //DOUBLY_LINKED_FL enable_preemptive (); #ifdef MULTIPLE_HEAPS bgc_t_join.join(this, gc_join_set_state_free); if (bgc_t_join.joined()) #endif //MULTIPLE_HEAPS { // TODO: We are using this join just to set the state. Should // look into eliminating it - check to make sure things that use // this state can live with per heap state like should_check_bgc_mark. current_c_gc_state = c_gc_state_free; #ifdef BGC_SERVO_TUNING if (bgc_tuning::enable_fl_tuning) { enter_spin_lock (&gc_lock); bgc_tuning::record_and_adjust_bgc_end(); leave_spin_lock (&gc_lock); } #endif //BGC_SERVO_TUNING #ifdef MULTIPLE_HEAPS dprintf(2, ("Starting BGC threads after background sweep phase")); bgc_t_join.restart(); #endif //MULTIPLE_HEAPS } disable_preemptive (true); add_saved_spinlock_info (true, me_release, mt_bgc_uoh_sweep); leave_spin_lock (&more_space_lock_uoh); //dprintf (GTC_LOG, ("---- (GC%d)End Background Sweep Phase ----", VolatileLoad(&settings.gc_index))); dprintf (GTC_LOG, ("---- (GC%d)ESw ----", VolatileLoad(&settings.gc_index))); } #endif //BACKGROUND_GC void gc_heap::sweep_uoh_objects (int gen_num) { //this min value is for the sake of the dynamic tuning. //so we know that we are not starting even if we have no //survivors. generation* gen = generation_of (gen_num); heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(start_seg != NULL); heap_segment* seg = start_seg; heap_segment* prev_seg = 0; uint8_t* o = get_uoh_start_object (seg, gen); uint8_t* plug_end = o; uint8_t* plug_start = o; generation_allocator (gen)->clear(); generation_free_list_space (gen) = 0; generation_free_obj_space (gen) = 0; generation_free_list_allocated (gen) = 0; dprintf (3, ("sweeping uoh objects")); dprintf (3, ("seg: %Ix, [%Ix, %Ix[, starting from %Ix", (size_t)seg, (size_t)heap_segment_mem (seg), (size_t)heap_segment_allocated (seg), o)); while (1) { if (o >= heap_segment_allocated (seg)) { heap_segment* next_seg = heap_segment_next (seg); //delete the empty segment if not the only one // REGIONS TODO: for regions we can get rid of the start_seg. Just need // to update start region accordingly. if ((plug_end == heap_segment_mem (seg)) && (seg != start_seg) && !heap_segment_read_only_p (seg)) { //prepare for deletion dprintf (3, ("Preparing empty large segment %Ix", (size_t)seg)); assert (prev_seg); heap_segment_next (prev_seg) = next_seg; heap_segment_next (seg) = freeable_uoh_segment; freeable_uoh_segment = seg; #ifdef USE_REGIONS update_start_tail_regions (gen, seg, prev_seg, next_seg); #endif //USE_REGIONS } else { if (!heap_segment_read_only_p (seg)) { dprintf (3, ("Trimming seg to %Ix[", (size_t)plug_end)); heap_segment_allocated (seg) = plug_end; decommit_heap_segment_pages (seg, 0); } prev_seg = seg; } seg = next_seg; if (seg == 0) break; else { o = heap_segment_mem (seg); plug_end = o; dprintf (3, ("seg: %Ix, [%Ix, %Ix[", (size_t)seg, (size_t)heap_segment_mem (seg), (size_t)heap_segment_allocated (seg))); #ifdef USE_REGIONS continue; #endif //USE_REGIONS } } if (uoh_object_marked(o, TRUE)) { plug_start = o; //everything between plug_end and plug_start is free thread_gap (plug_end, plug_start-plug_end, gen); BOOL m = TRUE; while (m) { o = o + AlignQword (size (o)); if (o >= heap_segment_allocated (seg)) { break; } m = uoh_object_marked (o, TRUE); } plug_end = o; dprintf (3, ("plug [%Ix, %Ix[", (size_t)plug_start, (size_t)plug_end)); } else { while (o < heap_segment_allocated (seg) && !uoh_object_marked(o, FALSE)) { o = o + AlignQword (size (o)); } } } generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(generation_allocation_segment(gen) != NULL); } void gc_heap::relocate_in_uoh_objects (int gen_num) { generation* gen = generation_of (gen_num); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(seg != NULL); uint8_t* o = get_uoh_start_object (seg, gen); while (1) { if (o >= heap_segment_allocated (seg)) { seg = heap_segment_next_rw (seg); if (seg == 0) break; else { o = heap_segment_mem (seg); } } while (o < heap_segment_allocated (seg)) { check_class_object_demotion (o); if (contain_pointers (o)) { dprintf(3, ("Relocating through uoh object %Ix", (size_t)o)); go_through_object_nostart (method_table (o), o, size(o), pval, { reloc_survivor_helper (pval); }); } o = o + AlignQword (size (o)); } } } void gc_heap::mark_through_cards_for_uoh_objects (card_fn fn, int gen_num, BOOL relocating CARD_MARKING_STEALING_ARG(gc_heap* hpt)) { #ifdef USE_REGIONS uint8_t* low = 0; #else uint8_t* low = gc_low; #endif //USE_REGIONS size_t end_card = 0; generation* oldest_gen = generation_of (gen_num); heap_segment* seg = heap_segment_rw (generation_start_segment (oldest_gen)); PREFIX_ASSUME(seg != NULL); uint8_t* beg = get_uoh_start_object (seg, oldest_gen); uint8_t* end = heap_segment_allocated (seg); size_t cg_pointers_found = 0; size_t card_word_end = (card_of (align_on_card_word (end)) / card_word_width); size_t n_eph = 0; size_t n_gen = 0; size_t n_card_set = 0; #ifdef USE_REGIONS uint8_t* next_boundary = 0; uint8_t* nhigh = 0; #else uint8_t* next_boundary = (relocating ? generation_plan_allocation_start (generation_of (max_generation -1)) : ephemeral_low); uint8_t* nhigh = (relocating ? heap_segment_plan_allocated (ephemeral_heap_segment) : ephemeral_high); #endif //USE_REGIONS BOOL foundp = FALSE; uint8_t* start_address = 0; uint8_t* limit = 0; size_t card = card_of (beg); uint8_t* o = beg; #ifdef BACKGROUND_GC BOOL consider_bgc_mark_p = FALSE; BOOL check_current_sweep_p = FALSE; BOOL check_saved_sweep_p = FALSE; should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p); #endif //BACKGROUND_GC size_t total_cards_cleared = 0; #ifdef FEATURE_CARD_MARKING_STEALING VOLATILE(uint32_t)* chunk_index = (VOLATILE(uint32_t)*) &(gen_num == loh_generation ? card_mark_chunk_index_loh : card_mark_chunk_index_poh); card_marking_enumerator card_mark_enumerator(seg, low, chunk_index); card_word_end = 0; #endif // FEATURE_CARD_MARKING_STEALING #ifdef USE_REGIONS int condemned_gen = settings.condemned_generation; #else int condemned_gen = -1; #endif //USE_REGIONS //dprintf(3,( "scanning large objects from %Ix to %Ix", (size_t)beg, (size_t)end)); dprintf(3, ("CMl: %Ix->%Ix", (size_t)beg, (size_t)end)); while (1) { if ((o < end) && (card_of(o) > card)) { dprintf (3, ("Found %Id cg pointers", cg_pointers_found)); if (cg_pointers_found == 0) { uint8_t* last_object_processed = o; #ifdef FEATURE_CARD_MARKING_STEALING last_object_processed = min(limit, o); #endif // FEATURE_CARD_MARKING_STEALING dprintf (3, (" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card), (size_t)last_object_processed)); clear_cards (card, card_of((uint8_t*)last_object_processed)); total_cards_cleared += (card_of((uint8_t*)last_object_processed) - card); } n_eph +=cg_pointers_found; cg_pointers_found = 0; card = card_of ((uint8_t*)o); } if ((o < end) &&(card >= end_card)) { #ifdef FEATURE_CARD_MARKING_STEALING // find another chunk with some cards set foundp = find_next_chunk(card_mark_enumerator, seg, n_card_set, start_address, limit, card, end_card, card_word_end); #else // FEATURE_CARD_MARKING_STEALING foundp = find_card (card_table, card, card_word_end, end_card); if (foundp) { n_card_set+= end_card - card; start_address = max (beg, card_address (card)); } limit = min (end, card_address (end_card)); #endif // FEATURE_CARD_MARKING_STEALING } if ((!foundp) || (o >= end) || (card_address (card) >= end)) { if ((foundp) && (cg_pointers_found == 0)) { dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card), (size_t)card_address(card+1))); clear_cards (card, card+1); total_cards_cleared += 1; } n_eph +=cg_pointers_found; cg_pointers_found = 0; #ifdef FEATURE_CARD_MARKING_STEALING // we have decided to move to the next segment - make sure we exhaust the chunk enumerator for this segment card_mark_enumerator.exhaust_segment(seg); #endif // FEATURE_CARD_MARKING_STEALING if ((seg = heap_segment_next_rw (seg)) != 0) { #ifdef BACKGROUND_GC should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p); #endif //BACKGROUND_GC beg = heap_segment_mem (seg); end = compute_next_end (seg, low); #ifdef FEATURE_CARD_MARKING_STEALING card_word_end = 0; #else // FEATURE_CARD_MARKING_STEALING card_word_end = card_of (align_on_card_word (end)) / card_word_width; #endif // FEATURE_CARD_MARKING_STEALING card = card_of (beg); o = beg; end_card = 0; continue; } else { break; } } assert (card_set_p (card)); { dprintf(3,("card %Ix: o: %Ix, l: %Ix[ ", card, (size_t)o, (size_t)limit)); assert (Align (size (o)) >= Align (min_obj_size)); size_t s = size (o); uint8_t* next_o = o + AlignQword (s); Prefetch (next_o); while (o < limit) { s = size (o); assert (Align (s) >= Align (min_obj_size)); next_o = o + AlignQword (s); Prefetch (next_o); dprintf (4, ("|%Ix|", (size_t)o)); if (next_o < start_address) { goto end_object; } #ifdef BACKGROUND_GC if (!fgc_should_consider_object (o, seg, consider_bgc_mark_p, check_current_sweep_p, check_saved_sweep_p)) { goto end_object; } #endif //BACKGROUND_GC #ifdef COLLECTIBLE_CLASS if (is_collectible(o)) { BOOL passed_end_card_p = FALSE; if (card_of (o) > card) { passed_end_card_p = card_transition (o, end, card_word_end, cg_pointers_found, n_eph, n_card_set, card, end_card, foundp, start_address, limit, total_cards_cleared CARD_MARKING_STEALING_ARGS(card_mark_enumerator, seg, card_word_end)); } if ((!passed_end_card_p || foundp) && (card_of (o) == card)) { // card is valid and it covers the head of the object if (fn == &gc_heap::relocate_address) { cg_pointers_found++; } else { uint8_t* class_obj = get_class_object (o); mark_through_cards_helper (&class_obj, n_gen, cg_pointers_found, fn, nhigh, next_boundary, condemned_gen, max_generation CARD_MARKING_STEALING_ARG(hpt)); } } if (passed_end_card_p) { if (foundp && (card_address (card) < next_o)) { goto go_through_refs; } else { goto end_object; } } } go_through_refs: #endif //COLLECTIBLE_CLASS if (contain_pointers (o)) { dprintf(3,("Going through %Ix", (size_t)o)); go_through_object (method_table(o), o, s, poo, start_address, use_start, (o + s), { if (card_of ((uint8_t*)poo) > card) { BOOL passed_end_card_p = card_transition ((uint8_t*)poo, end, card_word_end, cg_pointers_found, n_eph, n_card_set, card, end_card, foundp, start_address, limit, total_cards_cleared CARD_MARKING_STEALING_ARGS(card_mark_enumerator, seg, card_word_end)); if (passed_end_card_p) { if (foundp && (card_address (card) < next_o)) { //new_start(); { if (ppstop <= (uint8_t**)start_address) {break;} else if (poo < (uint8_t**)start_address) {poo = (uint8_t**)start_address;} } } else { goto end_object; } } } mark_through_cards_helper (poo, n_gen, cg_pointers_found, fn, nhigh, next_boundary, condemned_gen, max_generation CARD_MARKING_STEALING_ARG(hpt)); } ); } end_object: o = next_o; } } } // compute the efficiency ratio of the card table if (!relocating) { #ifdef FEATURE_CARD_MARKING_STEALING Interlocked::ExchangeAddPtr(&n_eph_loh, n_eph); Interlocked::ExchangeAddPtr(&n_gen_loh, n_gen); dprintf (3, ("h%d marking h%d Mloh: cross: %Id, useful: %Id, cards set: %Id, cards cleared: %Id, ratio: %d", hpt->heap_number, heap_number, n_eph, n_gen, n_card_set, total_cards_cleared, (n_eph ? (int)(((float)n_gen / (float)n_eph) * 100) : 0))); dprintf (3, ("h%d marking h%d Mloh: total cross %Id, useful: %Id, running ratio: %d", hpt->heap_number, heap_number, (size_t)n_eph_loh, (size_t)n_gen_loh, (n_eph_loh ? (int)(((float)n_gen_loh / (float)n_eph_loh) * 100) : 0))); #else generation_skip_ratio = min (((n_eph > MIN_LOH_CROSS_GEN_REFS) ? (int)(((float)n_gen / (float)n_eph) * 100) : 100), generation_skip_ratio); dprintf (3, ("marking h%d Mloh: cross: %Id, useful: %Id, cards cleared: %Id, cards set: %Id, ratio: %d", heap_number, n_eph, n_gen, total_cards_cleared, n_card_set, generation_skip_ratio)); #endif //FEATURE_CARD_MARKING_STEALING } else { dprintf (3, ("R: Mloh: cross: %Id, useful: %Id, cards set: %Id, ratio: %d", n_eph, n_gen, n_card_set, generation_skip_ratio)); } } void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context) { #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = NULL; #ifdef _PREFAST_ // prefix complains about us dereferencing hp in wks build even though we only access static members // this way. not sure how to shut it up except for this ugly workaround: PREFIX_ASSUME(hp != NULL); #endif // _PREFAST_ #endif //MULTIPLE_HEAPS for (int curr_gen_number = total_generation_count-1; curr_gen_number >= 0; curr_gen_number--) { generation* gen = hp->generation_of (curr_gen_number); heap_segment* seg = generation_start_segment (gen); #ifdef USE_REGIONS while (seg) { fn(context, curr_gen_number, heap_segment_mem (seg), heap_segment_allocated (seg), heap_segment_reserved (seg)); seg = heap_segment_next (seg); } #else while (seg && (seg != hp->ephemeral_heap_segment)) { assert (curr_gen_number > 0); // report bounds from heap_segment_mem (seg) to // heap_segment_allocated (seg); // for generation # curr_gen_number // for heap # heap_no fn(context, curr_gen_number, heap_segment_mem (seg), heap_segment_allocated (seg), (curr_gen_number > max_generation) ? heap_segment_reserved (seg) : heap_segment_allocated (seg)); seg = heap_segment_next (seg); } if (seg) { assert (seg == hp->ephemeral_heap_segment); assert (curr_gen_number <= max_generation); if (curr_gen_number == max_generation) { if (heap_segment_mem (seg) < generation_allocation_start (hp->generation_of (max_generation-1))) { // report bounds from heap_segment_mem (seg) to // generation_allocation_start (generation_of (max_generation-1)) // for heap # heap_number fn(context, curr_gen_number, heap_segment_mem (seg), generation_allocation_start (hp->generation_of (max_generation-1)), generation_allocation_start (hp->generation_of (max_generation-1)) ); } } else if (curr_gen_number != 0) { //report bounds from generation_allocation_start (generation_of (curr_gen_number)) // to generation_allocation_start (generation_of (curr_gen_number-1)) // for heap # heap_number fn(context, curr_gen_number, generation_allocation_start (hp->generation_of (curr_gen_number)), generation_allocation_start (hp->generation_of (curr_gen_number-1)), generation_allocation_start (hp->generation_of (curr_gen_number-1))); } else { //report bounds from generation_allocation_start (generation_of (curr_gen_number)) // to heap_segment_allocated (ephemeral_heap_segment); // for heap # heap_number fn(context, curr_gen_number, generation_allocation_start (hp->generation_of (curr_gen_number)), heap_segment_allocated (hp->ephemeral_heap_segment), heap_segment_reserved (hp->ephemeral_heap_segment) ); } } #endif //USE_REGIONS } } } #ifdef TRACE_GC // Note that when logging is on it can take a long time to go through the free items. void gc_heap::print_free_list (int gen, heap_segment* seg) { UNREFERENCED_PARAMETER(gen); UNREFERENCED_PARAMETER(seg); /* if (settings.concurrent == FALSE) { uint8_t* seg_start = heap_segment_mem (seg); uint8_t* seg_end = heap_segment_allocated (seg); dprintf (3, ("Free list in seg %Ix:", seg_start)); size_t total_free_item = 0; allocator* gen_allocator = generation_allocator (generation_of (gen)); for (unsigned int b = 0; b < gen_allocator->number_of_buckets(); b++) { uint8_t* fo = gen_allocator->alloc_list_head_of (b); while (fo) { if (fo >= seg_start && fo < seg_end) { total_free_item++; size_t free_item_len = size(fo); dprintf (3, ("[%Ix, %Ix[:%Id", (size_t)fo, (size_t)(fo + free_item_len), free_item_len)); } fo = free_list_slot (fo); } } dprintf (3, ("total %Id free items", total_free_item)); } */ } #endif //TRACE_GC void gc_heap::descr_generations (const char* msg) { #ifndef TRACE_GC UNREFERENCED_PARAMETER(msg); #endif //!TRACE_GC #ifdef STRESS_LOG if (StressLog::StressLogOn(LF_GC, LL_INFO10)) { gc_heap* hp = 0; #ifdef MULTIPLE_HEAPS hp= this; #endif //MULTIPLE_HEAPS STRESS_LOG1(LF_GC, LL_INFO10, "GC Heap %p\n", hp); for (int n = max_generation; n >= 0; --n) { #ifndef USE_REGIONS STRESS_LOG4(LF_GC, LL_INFO10, " Generation %d [%p, %p] cur = %p\n", n, generation_allocation_start(generation_of(n)), generation_allocation_limit(generation_of(n)), generation_allocation_pointer(generation_of(n))); #endif //USE_REGIONS heap_segment* seg = generation_start_segment(generation_of(n)); while (seg) { STRESS_LOG4(LF_GC, LL_INFO10, " Segment mem %p alloc = %p used %p committed %p\n", heap_segment_mem(seg), heap_segment_allocated(seg), heap_segment_used(seg), heap_segment_committed(seg)); seg = heap_segment_next(seg); } } } #endif // STRESS_LOG #ifdef TRACE_GC dprintf (2, ("lowest_address: %Ix highest_address: %Ix", (size_t) lowest_address, (size_t) highest_address)); #ifdef BACKGROUND_GC dprintf (2, ("bgc lowest_address: %Ix bgc highest_address: %Ix", (size_t) background_saved_lowest_address, (size_t) background_saved_highest_address)); #endif //BACKGROUND_GC if (heap_number == 0) { dprintf (1, ("total heap size: %Id, commit size: %Id", get_total_heap_size(), get_total_committed_size())); } for (int curr_gen_number = total_generation_count - 1; curr_gen_number >= 0; curr_gen_number--) { size_t total_gen_size = generation_size (curr_gen_number); #ifdef SIMPLE_DPRINTF dprintf (GTC_LOG, ("[%s][g%d]gen %d:, size: %Id, frag: %Id(L: %Id, O: %Id), f: %d%% %s %s %s", msg, settings.condemned_generation, curr_gen_number, total_gen_size, dd_fragmentation (dynamic_data_of (curr_gen_number)), generation_free_list_space (generation_of (curr_gen_number)), generation_free_obj_space (generation_of (curr_gen_number)), (total_gen_size ? (int)(((double)dd_fragmentation (dynamic_data_of (curr_gen_number)) / (double)total_gen_size) * 100) : 0), (settings.compaction ? "(compact)" : "(sweep)"), (settings.heap_expansion ? "(EX)" : " "), (settings.promotion ? "Promotion" : "NoPromotion"))); #else dprintf (2, ( "Generation %d: generation size: %Id, fragmentation: %Id", curr_gen_number, total_gen_size, dd_fragmentation (dynamic_data_of (curr_gen_number)))); #endif //SIMPLE_DPRINTF generation* gen = generation_of (curr_gen_number); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); #ifdef USE_REGIONS dprintf (1, ("g%d: start seg: %Ix alloc seg: %Ix, tail region: %Ix", curr_gen_number, heap_segment_mem (seg), heap_segment_mem (generation_allocation_segment (gen)), heap_segment_mem (generation_tail_region (gen)))); while (seg) { dprintf (GTC_LOG, ("g%d: (%d:p %d) [%Ix %Ix(sa: %Ix, pa: %Ix)[-%Ix[ (%Id) (%Id)", curr_gen_number, heap_segment_gen_num (seg), heap_segment_plan_gen_num (seg), (size_t)heap_segment_mem (seg), (size_t)heap_segment_allocated (seg), (size_t)heap_segment_saved_allocated (seg), (size_t)heap_segment_plan_allocated (seg), (size_t)heap_segment_committed (seg), (size_t)(heap_segment_allocated (seg) - heap_segment_mem (seg)), (size_t)(heap_segment_committed (seg) - heap_segment_allocated (seg)))); print_free_list (curr_gen_number, seg); seg = heap_segment_next (seg); } #else while (seg && (seg != ephemeral_heap_segment)) { dprintf (GTC_LOG, ("g%d: [%Ix %Ix[-%Ix[ (%Id) (%Id)", curr_gen_number, (size_t)heap_segment_mem (seg), (size_t)heap_segment_allocated (seg), (size_t)heap_segment_committed (seg), (size_t)(heap_segment_allocated (seg) - heap_segment_mem (seg)), (size_t)(heap_segment_committed (seg) - heap_segment_allocated (seg)))); print_free_list (curr_gen_number, seg); seg = heap_segment_next (seg); } if (seg && (seg != generation_start_segment (gen))) { dprintf (GTC_LOG, ("g%d: [%Ix %Ix[", curr_gen_number, (size_t)heap_segment_mem (seg), (size_t)generation_allocation_start (generation_of (curr_gen_number-1)))); print_free_list (curr_gen_number, seg); } else if (seg) { dprintf (GTC_LOG, ("g%d: [%Ix %Ix[", curr_gen_number, (size_t)generation_allocation_start (generation_of (curr_gen_number)), (size_t)(((curr_gen_number == 0)) ? (heap_segment_allocated (generation_start_segment (generation_of (curr_gen_number)))) : (generation_allocation_start (generation_of (curr_gen_number - 1)))) )); print_free_list (curr_gen_number, seg); } #endif //USE_REGIONS } #endif //TRACE_GC } //----------------------------------------------------------------------------- // // VM Specific support // //----------------------------------------------------------------------------- //Static member variables. VOLATILE(BOOL) GCHeap::GcInProgress = FALSE; GCEvent *GCHeap::WaitForGCEvent = NULL; unsigned GCHeap::GcCondemnedGeneration = 0; size_t GCHeap::totalSurvivedSize = 0; #ifdef FEATURE_PREMORTEM_FINALIZATION CFinalize* GCHeap::m_Finalize = 0; BOOL GCHeap::GcCollectClasses = FALSE; VOLATILE(int32_t) GCHeap::m_GCFLock = 0; #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way #ifdef STRESS_HEAP #ifndef MULTIPLE_HEAPS OBJECTHANDLE GCHeap::m_StressObjs[NUM_HEAP_STRESS_OBJS]; int GCHeap::m_CurStressObj = 0; #endif // !MULTIPLE_HEAPS #endif // STRESS_HEAP #endif // FEATURE_REDHAWK #endif //FEATURE_PREMORTEM_FINALIZATION class NoGCRegionLockHolder { public: NoGCRegionLockHolder() { enter_spin_lock_noinstru(&g_no_gc_lock); } ~NoGCRegionLockHolder() { leave_spin_lock_noinstru(&g_no_gc_lock); } }; // An explanation of locking for finalization: // // Multiple threads allocate objects. During the allocation, they are serialized by // the AllocLock above. But they release that lock before they register the object // for finalization. That's because there is much contention for the alloc lock, but // finalization is presumed to be a rare case. // // So registering an object for finalization must be protected by the FinalizeLock. // // There is another logical queue that involves finalization. When objects registered // for finalization become unreachable, they are moved from the "registered" queue to // the "unreachable" queue. Note that this only happens inside a GC, so no other // threads can be manipulating either queue at that time. Once the GC is over and // threads are resumed, the Finalizer thread will dequeue objects from the "unreachable" // queue and call their finalizers. This dequeue operation is also protected with // the finalize lock. // // At first, this seems unnecessary. Only one thread is ever enqueuing or dequeuing // on the unreachable queue (either the GC thread during a GC or the finalizer thread // when a GC is not in progress). The reason we share a lock with threads enqueuing // on the "registered" queue is that the "registered" and "unreachable" queues are // interrelated. // // They are actually two regions of a longer list, which can only grow at one end. // So to enqueue an object to the "registered" list, you actually rotate an unreachable // object at the boundary between the logical queues, out to the other end of the // unreachable queue -- where all growing takes place. Then you move the boundary // pointer so that the gap we created at the boundary is now on the "registered" // side rather than the "unreachable" side. Now the object can be placed into the // "registered" side at that point. This is much more efficient than doing moves // of arbitrarily long regions, but it causes the two queues to require a shared lock. // // Notice that Enter/LeaveFinalizeLock is not a GC-aware spin lock. Instead, it relies // on the fact that the lock will only be taken for a brief period and that it will // never provoke or allow a GC while the lock is held. This is critical. If the // FinalizeLock used enter_spin_lock (and thus sometimes enters preemptive mode to // allow a GC), then the Alloc client would have to GC protect a finalizable object // to protect against that eventuality. That is too slow! BOOL IsValidObject99(uint8_t *pObject) { #ifdef VERIFY_HEAP if (!((CObjectHeader*)pObject)->IsFree()) ((CObjectHeader *) pObject)->Validate(); #endif //VERIFY_HEAP return(TRUE); } #ifdef BACKGROUND_GC BOOL gc_heap::bgc_mark_array_range (heap_segment* seg, BOOL whole_seg_p, uint8_t** range_beg, uint8_t** range_end) { uint8_t* seg_start = heap_segment_mem (seg); uint8_t* seg_end = (whole_seg_p ? heap_segment_reserved (seg) : align_on_mark_word (heap_segment_allocated (seg))); if ((seg_start < background_saved_highest_address) && (seg_end > background_saved_lowest_address)) { *range_beg = max (seg_start, background_saved_lowest_address); *range_end = min (seg_end, background_saved_highest_address); return TRUE; } else { return FALSE; } } void gc_heap::bgc_verify_mark_array_cleared (heap_segment* seg) { #ifdef VERIFY_HEAP if (gc_heap::background_running_p() && (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)) { uint8_t* range_beg = 0; uint8_t* range_end = 0; if (bgc_mark_array_range (seg, TRUE, &range_beg, &range_end)) { size_t markw = mark_word_of (range_beg); size_t markw_end = mark_word_of (range_end); while (markw < markw_end) { if (mark_array [markw]) { dprintf (1, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", markw, mark_array [markw], mark_word_address (markw))); FATAL_GC_ERROR(); } markw++; } uint8_t* p = mark_word_address (markw_end); while (p < range_end) { assert (!(mark_array_marked (p))); p++; } } } #endif //VERIFY_HEAP } void gc_heap::verify_mark_bits_cleared (uint8_t* obj, size_t s) { #ifdef VERIFY_HEAP size_t start_mark_bit = mark_bit_of (obj) + 1; size_t end_mark_bit = mark_bit_of (obj + s); unsigned int startbit = mark_bit_bit (start_mark_bit); unsigned int endbit = mark_bit_bit (end_mark_bit); size_t startwrd = mark_bit_word (start_mark_bit); size_t endwrd = mark_bit_word (end_mark_bit); unsigned int result = 0; unsigned int firstwrd = ~(lowbits (~0, startbit)); unsigned int lastwrd = ~(highbits (~0, endbit)); if (startwrd == endwrd) { unsigned int wrd = firstwrd & lastwrd; result = mark_array[startwrd] & wrd; if (result) { FATAL_GC_ERROR(); } return; } // verify the first mark word is cleared. if (startbit) { result = mark_array[startwrd] & firstwrd; if (result) { FATAL_GC_ERROR(); } startwrd++; } for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++) { result = mark_array[wrdtmp]; if (result) { FATAL_GC_ERROR(); } } // set the last mark word. if (endbit) { result = mark_array[endwrd] & lastwrd; if (result) { FATAL_GC_ERROR(); } } #endif //VERIFY_HEAP } void gc_heap::clear_all_mark_array() { for (int i = get_start_generation_index(); i < total_generation_count; i++) { generation* gen = generation_of (i); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); while (seg) { uint8_t* range_beg = 0; uint8_t* range_end = 0; if (bgc_mark_array_range (seg, (seg == ephemeral_heap_segment), &range_beg, &range_end)) { size_t markw = mark_word_of (range_beg); size_t markw_end = mark_word_of (range_end); size_t size_total = (markw_end - markw) * sizeof (uint32_t); //num_dwords_written = markw_end - markw; size_t size = 0; size_t size_left = 0; assert (((size_t)&mark_array[markw] & (sizeof(PTR_PTR)-1)) == 0); if ((size_total & (sizeof(PTR_PTR) - 1)) != 0) { size = (size_total & ~(sizeof(PTR_PTR) - 1)); size_left = size_total - size; assert ((size_left & (sizeof (uint32_t) - 1)) == 0); } else { size = size_total; } memclr ((uint8_t*)&mark_array[markw], size); if (size_left != 0) { uint32_t* markw_to_clear = &mark_array[markw + size / sizeof (uint32_t)]; for (size_t i = 0; i < (size_left / sizeof (uint32_t)); i++) { *markw_to_clear = 0; markw_to_clear++; } } } seg = heap_segment_next_rw (seg); } } } void gc_heap::verify_mark_array_cleared() { #ifdef VERIFY_HEAP if (gc_heap::background_running_p() && (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)) { for (int i = get_start_generation_index(); i < total_generation_count; i++) { generation* gen = generation_of (i); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); while (seg) { bgc_verify_mark_array_cleared (seg); seg = heap_segment_next_rw (seg); } } } #endif //VERIFY_HEAP } #endif //BACKGROUND_GC // This function is called to make sure we don't mess up the segment list // in SOH. It's called by: // 1) begin and end of ephemeral GCs // 2) during bgc sweep when we switch segments. void gc_heap::verify_soh_segment_list() { #ifdef VERIFY_HEAP if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { for (int i = get_start_generation_index(); i <= max_generation; i++) { generation* gen = generation_of (i); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); heap_segment* last_seg = 0; while (seg) { last_seg = seg; seg = heap_segment_next_rw (seg); } #ifdef USE_REGIONS if (last_seg != generation_tail_region (gen)) #else if (last_seg != ephemeral_heap_segment) #endif //USE_REGIONS { FATAL_GC_ERROR(); } } } #endif //VERIFY_HEAP } // This function can be called at any foreground GCs or blocking GCs. For background GCs, // it can be called at the end of the final marking; and at any point during background // sweep. // NOTE - to be able to call this function during background sweep, we need to temporarily // NOT clear the mark array bits as we go. #ifdef BACKGROUND_GC void gc_heap::verify_partial() { // Different ways to fail. BOOL mark_missed_p = FALSE; BOOL bad_ref_p = FALSE; BOOL free_ref_p = FALSE; for (int i = get_start_generation_index(); i < total_generation_count; i++) { generation* gen = generation_of (i); int align_const = get_alignment_constant (i == max_generation); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); while (seg) { uint8_t* o = heap_segment_mem (seg); uint8_t* end = heap_segment_allocated (seg); while (o < end) { size_t s = size (o); BOOL marked_p = background_object_marked (o, FALSE); if (marked_p) { go_through_object_cl (method_table (o), o, s, oo, { if (*oo) { //dprintf (3, ("VOM: verifying member %Ix in obj %Ix", (size_t)*oo, o)); MethodTable *pMT = method_table (*oo); if (pMT == g_gc_pFreeObjectMethodTable) { free_ref_p = TRUE; FATAL_GC_ERROR(); } if (!pMT->SanityCheck()) { bad_ref_p = TRUE; dprintf (1, ("Bad member of %Ix %Ix", (size_t)oo, (size_t)*oo)); FATAL_GC_ERROR(); } if (current_bgc_state == bgc_final_marking) { if (marked_p && !background_object_marked (*oo, FALSE)) { mark_missed_p = TRUE; FATAL_GC_ERROR(); } } } } ); } o = o + Align(s, align_const); } seg = heap_segment_next_rw (seg); } } } #endif //BACKGROUND_GC #ifdef VERIFY_HEAP void gc_heap::verify_free_lists () { for (int gen_num = 0; gen_num < total_generation_count; gen_num++) { dprintf (3, ("Verifying free list for gen:%d", gen_num)); allocator* gen_alloc = generation_allocator (generation_of (gen_num)); size_t sz = gen_alloc->first_bucket_size(); bool verify_undo_slot = (gen_num != 0) && (gen_num <= max_generation) && !gen_alloc->discard_if_no_fit_p(); for (unsigned int a_l_number = 0; a_l_number < gen_alloc->number_of_buckets(); a_l_number++) { uint8_t* free_list = gen_alloc->alloc_list_head_of (a_l_number); uint8_t* prev = 0; while (free_list) { if (!((CObjectHeader*)free_list)->IsFree()) { dprintf (1, ("Verifiying Heap: curr free list item %Ix isn't a free object)", (size_t)free_list)); FATAL_GC_ERROR(); } if (((a_l_number < (gen_alloc->number_of_buckets()-1))&& (unused_array_size (free_list) >= sz)) || ((a_l_number != 0) && (unused_array_size (free_list) < sz/2))) { dprintf (1, ("Verifiying Heap: curr free list item %Ix isn't in the right bucket", (size_t)free_list)); FATAL_GC_ERROR(); } if (verify_undo_slot && (free_list_undo (free_list) != UNDO_EMPTY)) { dprintf (1, ("Verifiying Heap: curr free list item %Ix has non empty undo slot", (size_t)free_list)); FATAL_GC_ERROR(); } if ((gen_num <= max_generation) && (object_gennum (free_list)!= gen_num)) { dprintf (1, ("Verifiying Heap: curr free list item %Ix is in the wrong generation free list", (size_t)free_list)); FATAL_GC_ERROR(); } #ifdef DOUBLY_LINKED_FL uint8_t* prev_free_item = free_list_prev (free_list); if (gen_num == max_generation) { if (prev_free_item != prev) { dprintf (1, ("%Ix prev should be: %Ix, actual: %Ix", free_list, prev_free_item, prev)); FATAL_GC_ERROR(); } } #endif //DOUBLY_LINKED_FL prev = free_list; free_list = free_list_slot (free_list); } //verify the sanity of the tail uint8_t* tail = gen_alloc->alloc_list_tail_of (a_l_number); if (!((tail == 0) || (tail == prev))) { dprintf (1, ("Verifying Heap: tail of free list is not correct, tail %Ix, prev %Ix", tail, prev)); FATAL_GC_ERROR(); } if (tail == 0) { uint8_t* head = gen_alloc->alloc_list_head_of (a_l_number); if ((head != 0) && (free_list_slot (head) != 0)) { dprintf (1, ("Verifying Heap: head of free list is not correct, head %Ix -> %Ix", head, free_list_slot (head))); FATAL_GC_ERROR(); } } sz *=2; } } } void gc_heap::verify_regions (int gen_number, bool can_verify_gen_num, bool can_verify_tail) { #ifdef USE_REGIONS // For the given generation, verify that // // 1) it has at least one region. // 2) the tail region is the same as the last region if we following the list of regions // in that generation. // 3) no region is pointing to itself. // 4) if we can verify gen num, each region's gen_num and plan_gen_num are the same and // they are the right generation. generation* gen = generation_of (gen_number); int num_regions_in_gen = 0; heap_segment* seg_in_gen = heap_segment_rw (generation_start_segment (gen)); heap_segment* prev_region_in_gen = 0; heap_segment* tail_region = generation_tail_region (gen); while (seg_in_gen) { if (can_verify_gen_num) { if (heap_segment_gen_num (seg_in_gen) != min (gen_number, max_generation)) { dprintf (REGIONS_LOG, ("h%d gen%d region %Ix(%Ix) gen is %d!", heap_number, gen_number, seg_in_gen, heap_segment_mem (seg_in_gen), heap_segment_gen_num (seg_in_gen))); FATAL_GC_ERROR(); } if (heap_segment_gen_num (seg_in_gen) != heap_segment_plan_gen_num (seg_in_gen)) { dprintf (REGIONS_LOG, ("h%d gen%d region %Ix(%Ix) gen is %d but plan gen is %d!!", heap_number, gen_number, seg_in_gen, heap_segment_mem (seg_in_gen), heap_segment_gen_num (seg_in_gen), heap_segment_plan_gen_num (seg_in_gen))); FATAL_GC_ERROR(); } } if (heap_segment_allocated (seg_in_gen) > heap_segment_reserved (seg_in_gen)) { dprintf (REGIONS_LOG, ("h%d gen%d region %Ix alloc %Ix > reserved %Ix!!", heap_number, gen_number, heap_segment_mem (seg_in_gen), heap_segment_allocated (seg_in_gen), heap_segment_reserved (seg_in_gen))); FATAL_GC_ERROR(); } prev_region_in_gen = seg_in_gen; num_regions_in_gen++; heap_segment* next_region = heap_segment_next (seg_in_gen); if (seg_in_gen == next_region) { dprintf (REGIONS_LOG, ("h%d gen%d region %Ix(%Ix) pointing to itself!!", heap_number, gen_number, seg_in_gen, heap_segment_mem (seg_in_gen))); FATAL_GC_ERROR(); } seg_in_gen = next_region; } if (num_regions_in_gen == 0) { dprintf (REGIONS_LOG, ("h%d gen%d has no regions!!", heap_number, gen_number)); FATAL_GC_ERROR(); } if (can_verify_tail && (tail_region != prev_region_in_gen)) { dprintf (REGIONS_LOG, ("h%d gen%d tail region is %Ix(%Ix), diff from last region %Ix(%Ix)!!", heap_number, gen_number, tail_region, heap_segment_mem (tail_region), prev_region_in_gen, heap_segment_mem (prev_region_in_gen))); FATAL_GC_ERROR(); } #endif //USE_REGIONS } inline bool is_user_alloc_gen (int gen_number) { return ((gen_number == soh_gen0) || (gen_number == loh_generation) || (gen_number == poh_generation)); } void gc_heap::verify_regions (bool can_verify_gen_num, bool concurrent_p) { #ifdef USE_REGIONS for (int i = 0; i < total_generation_count; i++) { bool can_verify_tail = (concurrent_p ? !is_user_alloc_gen (i) : true); verify_regions (i, can_verify_gen_num, can_verify_tail); } #endif //USE_REGIONS } BOOL gc_heap::check_need_card (uint8_t* child_obj, int gen_num_for_cards, uint8_t* low, uint8_t* high) { #ifdef USE_REGIONS return (get_region_gen_num (child_obj) < gen_num_for_cards); #else return ((child_obj < high) && (child_obj >= low)); #endif //USE_REGIONS } void gc_heap::enter_gc_lock_for_verify_heap() { #ifdef VERIFY_HEAP if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { enter_spin_lock (&gc_heap::gc_lock); dprintf (SPINLOCK_LOG, ("enter gc_lock for verify_heap")); } #endif // VERIFY_HEAP } void gc_heap::leave_gc_lock_for_verify_heap() { #ifdef VERIFY_HEAP if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { dprintf (SPINLOCK_LOG, ("leave gc_lock taken for verify_heap")); leave_spin_lock (&gc_heap::gc_lock); } #endif // VERIFY_HEAP } void gc_heap::verify_heap (BOOL begin_gc_p) { int heap_verify_level = static_cast<int>(GCConfig::GetHeapVerifyLevel()); #ifdef MULTIPLE_HEAPS t_join* current_join = &gc_t_join; #ifdef BACKGROUND_GC if (settings.concurrent && (bgc_thread_id.IsCurrentThread())) { // We always call verify_heap on entry of GC on the SVR GC threads. current_join = &bgc_t_join; } #endif //BACKGROUND_GC #endif //MULTIPLE_HEAPS #ifndef TRACE_GC UNREFERENCED_PARAMETER(begin_gc_p); #endif //!TRACE_GC #ifdef BACKGROUND_GC dprintf (2,("[%s]GC#%d(%s): Verifying heap - begin", (begin_gc_p ? "BEG" : "END"), VolatileLoad(&settings.gc_index), (settings.concurrent ? "BGC" : (gc_heap::background_running_p() ? "FGC" : "NGC")))); #else dprintf (2,("[%s]GC#%d: Verifying heap - begin", (begin_gc_p ? "BEG" : "END"), VolatileLoad(&settings.gc_index))); #endif //BACKGROUND_GC #ifndef MULTIPLE_HEAPS #ifndef USE_REGIONS if ((ephemeral_low != generation_allocation_start (generation_of (max_generation - 1))) || (ephemeral_high != heap_segment_reserved (ephemeral_heap_segment))) { FATAL_GC_ERROR(); } #endif //!USE_REGIONS #endif //MULTIPLE_HEAPS #ifdef BACKGROUND_GC //don't touch the memory because the program is allocating from it. if (!settings.concurrent) #endif //BACKGROUND_GC { if (!(heap_verify_level & GCConfig::HEAPVERIFY_NO_MEM_FILL)) { // 0xaa the unused portions of segments. for (int i = get_start_generation_index(); i < total_generation_count; i++) { generation* gen1 = generation_of (i); heap_segment* seg1 = heap_segment_rw (generation_start_segment (gen1)); while (seg1) { uint8_t* clear_start = heap_segment_allocated (seg1) - plug_skew; if (heap_segment_used (seg1) > clear_start) { dprintf (3, ("setting end of seg %Ix: [%Ix-[%Ix to 0xaa", heap_segment_mem (seg1), clear_start , heap_segment_used (seg1))); memset (heap_segment_allocated (seg1) - plug_skew, 0xaa, (heap_segment_used (seg1) - clear_start)); } seg1 = heap_segment_next_rw (seg1); } } } } #ifdef MULTIPLE_HEAPS current_join->join(this, gc_join_verify_copy_table); if (current_join->joined()) { // in concurrent GC, new segment could be allocated when GC is working so the card brick table might not be updated at this point for (int i = 0; i < n_heaps; i++) { //copy the card and brick tables if (g_gc_card_table != g_heaps[i]->card_table) { g_heaps[i]->copy_brick_card_table(); } } current_join->restart(); } #else if (g_gc_card_table != card_table) copy_brick_card_table(); #endif //MULTIPLE_HEAPS //verify that the generation structures makes sense { #ifdef USE_REGIONS verify_regions (true, settings.concurrent); #else //USE_REGIONS generation* gen = generation_of (max_generation); assert (generation_allocation_start (gen) == heap_segment_mem (heap_segment_rw (generation_start_segment (gen)))); int gen_num = max_generation-1; generation* prev_gen = gen; while (gen_num >= 0) { gen = generation_of (gen_num); assert (generation_allocation_segment (gen) == ephemeral_heap_segment); assert (generation_allocation_start (gen) >= heap_segment_mem (ephemeral_heap_segment)); assert (generation_allocation_start (gen) < heap_segment_allocated (ephemeral_heap_segment)); if (generation_start_segment (prev_gen ) == generation_start_segment (gen)) { assert (generation_allocation_start (prev_gen) < generation_allocation_start (gen)); } prev_gen = gen; gen_num--; } #endif //USE_REGIONS } size_t total_objects_verified = 0; size_t total_objects_verified_deep = 0; BOOL bCurrentBrickInvalid = FALSE; size_t last_valid_brick = 0; size_t curr_brick = 0; size_t prev_brick = (size_t)-1; int gen_num_for_cards = 0; #ifdef USE_REGIONS int gen_num_to_stop = 0; uint8_t* e_high = 0; uint8_t* next_boundary = 0; #else //USE_REGIONS // For no regions the gen number is seperately reduced when we detect the ephemeral seg. int gen_num_to_stop = max_generation; uint8_t* e_high = ephemeral_high; uint8_t* next_boundary = generation_allocation_start (generation_of (max_generation - 1)); uint8_t* begin_youngest = generation_allocation_start(generation_of(0)); #endif //!USE_REGIONS // go through all generations starting with the highest for (int curr_gen_num = total_generation_count - 1; curr_gen_num >= gen_num_to_stop; curr_gen_num--) { int align_const = get_alignment_constant (curr_gen_num == max_generation); BOOL large_brick_p = (curr_gen_num > max_generation); #ifdef USE_REGIONS gen_num_for_cards = ((curr_gen_num >= max_generation) ? max_generation : curr_gen_num); #endif //USE_REGIONS heap_segment* seg = heap_segment_in_range (generation_start_segment (generation_of (curr_gen_num) )); while (seg) { uint8_t* curr_object = heap_segment_mem (seg); uint8_t* prev_object = 0; #ifdef USE_REGIONS if (heap_segment_gen_num (seg) != heap_segment_plan_gen_num (seg)) { dprintf (1, ("Seg %Ix, gen num is %d, plan gen num is %d", heap_segment_mem (seg), heap_segment_gen_num (seg), heap_segment_plan_gen_num (seg))); FATAL_GC_ERROR(); } #endif //USE_REGIONS #ifdef BACKGROUND_GC BOOL consider_bgc_mark_p = FALSE; BOOL check_current_sweep_p = FALSE; BOOL check_saved_sweep_p = FALSE; should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p); #endif //BACKGROUND_GC while (curr_object < heap_segment_allocated (seg)) { if (is_mark_set (curr_object)) { dprintf (1, ("curr_object: %Ix is marked!",(size_t)curr_object)); FATAL_GC_ERROR(); } size_t s = size (curr_object); dprintf (3, ("o: %Ix, s: %d", (size_t)curr_object, s)); if (s == 0) { dprintf (1, ("Verifying Heap: size of current object %Ix == 0", curr_object)); FATAL_GC_ERROR(); } #ifndef USE_REGIONS // handle generation boundaries within ephemeral segment if (seg == ephemeral_heap_segment) { if ((curr_gen_num > 0) && (curr_object >= next_boundary)) { curr_gen_num--; if (curr_gen_num > 0) { next_boundary = generation_allocation_start (generation_of (curr_gen_num - 1)); } } } #endif //!USE_REGIONS #ifdef USE_REGIONS if (curr_gen_num != 0) #else // If object is not in the youngest generation, then lets // verify that the brick table is correct.... if (((seg != ephemeral_heap_segment) || (brick_of(curr_object) < brick_of(begin_youngest)))) #endif //USE_REGIONS { curr_brick = brick_of(curr_object); // Brick Table Verification... // // On brick transition // if brick is negative // verify that brick indirects to previous valid brick // else // set current brick invalid flag to be flipped if we // encounter an object at the correct place // if (curr_brick != prev_brick) { // If the last brick we were examining had positive // entry but we never found the matching object, then // we have a problem // If prev_brick was the last one of the segment // it's ok for it to be invalid because it is never looked at if (bCurrentBrickInvalid && (curr_brick != brick_of (heap_segment_mem (seg))) && !heap_segment_read_only_p (seg)) { dprintf (1, ("curr brick %Ix invalid", curr_brick)); FATAL_GC_ERROR(); } if (large_brick_p) { //large objects verify the table only if they are in //range. if ((heap_segment_reserved (seg) <= highest_address) && (heap_segment_mem (seg) >= lowest_address) && brick_table [curr_brick] != 0) { dprintf (1, ("curr_brick %Ix for large object %Ix is set to %Ix", curr_brick, (size_t)curr_object, (size_t)brick_table[curr_brick])); FATAL_GC_ERROR(); } else { bCurrentBrickInvalid = FALSE; } } else { // If the current brick contains a negative value make sure // that the indirection terminates at the last valid brick if (brick_table [curr_brick] <= 0) { if (brick_table [curr_brick] == 0) { dprintf(1, ("curr_brick %Ix for object %Ix set to 0", curr_brick, (size_t)curr_object)); FATAL_GC_ERROR(); } ptrdiff_t i = curr_brick; while ((i >= ((ptrdiff_t) brick_of (heap_segment_mem (seg)))) && (brick_table[i] < 0)) { i = i + brick_table[i]; } if (i < ((ptrdiff_t)(brick_of (heap_segment_mem (seg))) - 1)) { dprintf (1, ("ptrdiff i: %Ix < brick_of (heap_segment_mem (seg)):%Ix - 1. curr_brick: %Ix", i, brick_of (heap_segment_mem (seg)), curr_brick)); FATAL_GC_ERROR(); } bCurrentBrickInvalid = FALSE; } else if (!heap_segment_read_only_p (seg)) { bCurrentBrickInvalid = TRUE; } } } if (bCurrentBrickInvalid) { if (curr_object == (brick_address(curr_brick) + brick_table[curr_brick] - 1)) { bCurrentBrickInvalid = FALSE; last_valid_brick = curr_brick; } } } if (*((uint8_t**)curr_object) != (uint8_t *) g_gc_pFreeObjectMethodTable) { #ifdef FEATURE_LOH_COMPACTION if ((curr_gen_num == loh_generation) && (prev_object != 0)) { assert (method_table (prev_object) == g_gc_pFreeObjectMethodTable); } #endif //FEATURE_LOH_COMPACTION total_objects_verified++; BOOL can_verify_deep = TRUE; #ifdef BACKGROUND_GC can_verify_deep = fgc_should_consider_object (curr_object, seg, consider_bgc_mark_p, check_current_sweep_p, check_saved_sweep_p); #endif //BACKGROUND_GC BOOL deep_verify_obj = can_verify_deep; if ((heap_verify_level & GCConfig::HEAPVERIFY_DEEP_ON_COMPACT) && !settings.compaction) deep_verify_obj = FALSE; ((CObjectHeader*)curr_object)->ValidateHeap(deep_verify_obj); if (can_verify_deep) { if (curr_gen_num > 0) { BOOL need_card_p = FALSE; if (contain_pointers_or_collectible (curr_object)) { dprintf (4, ("curr_object: %Ix", (size_t)curr_object)); size_t crd = card_of (curr_object); BOOL found_card_p = card_set_p (crd); #ifdef COLLECTIBLE_CLASS if (is_collectible(curr_object)) { uint8_t* class_obj = get_class_object (curr_object); if (check_need_card (class_obj, gen_num_for_cards, next_boundary, e_high)) { if (!found_card_p) { dprintf (1, ("Card not set, curr_object = [%Ix:%Ix pointing to class object %Ix", card_of (curr_object), (size_t)curr_object, class_obj)); FATAL_GC_ERROR(); } } } #endif //COLLECTIBLE_CLASS if (contain_pointers(curr_object)) { go_through_object_nostart (method_table(curr_object), curr_object, s, oo, { if (crd != card_of ((uint8_t*)oo)) { crd = card_of ((uint8_t*)oo); found_card_p = card_set_p (crd); need_card_p = FALSE; } if (*oo && check_need_card (*oo, gen_num_for_cards, next_boundary, e_high)) { need_card_p = TRUE; } if (need_card_p && !found_card_p) { dprintf (1, ("Card not set, curr_object = [%Ix:%Ix, %Ix:%Ix[", card_of (curr_object), (size_t)curr_object, card_of (curr_object+Align(s, align_const)), (size_t)(curr_object+Align(s, align_const)))); FATAL_GC_ERROR(); } } ); } if (need_card_p && !found_card_p) { dprintf (1, ("Card not set, curr_object = [%Ix:%Ix, %Ix:%Ix[", card_of (curr_object), (size_t)curr_object, card_of (curr_object + Align(s, align_const)), (size_t)(curr_object + Align(s, align_const)))); FATAL_GC_ERROR(); } } } total_objects_verified_deep++; } } prev_object = curr_object; prev_brick = curr_brick; curr_object = curr_object + Align(s, align_const); if (curr_object < prev_object) { dprintf (1, ("overflow because of a bad object size: %Ix size %Ix", prev_object, s)); FATAL_GC_ERROR(); } } if (curr_object > heap_segment_allocated(seg)) { dprintf (1, ("Verifiying Heap: curr_object: %Ix > heap_segment_allocated (seg: %Ix) %Ix", (size_t)curr_object, (size_t)seg, heap_segment_allocated (seg))); FATAL_GC_ERROR(); } seg = heap_segment_next_in_range (seg); } } #ifdef BACKGROUND_GC dprintf (2, ("(%s)(%s)(%s) total_objects_verified is %Id, total_objects_verified_deep is %Id", (settings.concurrent ? "BGC" : (gc_heap::background_running_p () ? "FGC" : "NGC")), (begin_gc_p ? "BEG" : "END"), ((current_c_gc_state == c_gc_state_planning) ? "in plan" : "not in plan"), total_objects_verified, total_objects_verified_deep)); if (current_c_gc_state != c_gc_state_planning) { assert (total_objects_verified == total_objects_verified_deep); } #endif //BACKGROUND_GC verify_free_lists(); #ifdef FEATURE_PREMORTEM_FINALIZATION finalize_queue->CheckFinalizerObjects(); #endif // FEATURE_PREMORTEM_FINALIZATION { // to be consistent with handle table APIs pass a ScanContext* // to provide the heap number. the SC isn't complete though so // limit its scope to handle table verification. ScanContext sc; sc.thread_number = heap_number; GCScan::VerifyHandleTable(max_generation, max_generation, &sc); } #ifdef MULTIPLE_HEAPS current_join->join(this, gc_join_verify_objects_done); if (current_join->joined()) #endif //MULTIPLE_HEAPS { GCToEEInterface::VerifySyncTableEntry(); #ifdef MULTIPLE_HEAPS current_join->restart(); #endif //MULTIPLE_HEAPS } #ifdef BACKGROUND_GC if (settings.concurrent) { verify_mark_array_cleared(); } dprintf (2,("GC%d(%s): Verifying heap - end", VolatileLoad(&settings.gc_index), (settings.concurrent ? "BGC" : (gc_heap::background_running_p() ? "FGC" : "NGC")))); #else dprintf (2,("GC#d: Verifying heap - end", VolatileLoad(&settings.gc_index))); #endif //BACKGROUND_GC } #endif //VERIFY_HEAP void GCHeap::ValidateObjectMember (Object* obj) { #ifdef VERIFY_HEAP size_t s = size (obj); uint8_t* o = (uint8_t*)obj; go_through_object_cl (method_table (obj), o, s, oo, { uint8_t* child_o = *oo; if (child_o) { //dprintf (3, ("VOM: m: %Ix obj %Ix", (size_t)child_o, o)); MethodTable *pMT = method_table (child_o); assert(pMT); if (!pMT->SanityCheck()) { dprintf (1, ("Bad member of %Ix %Ix", (size_t)oo, (size_t)child_o)); FATAL_GC_ERROR(); } } } ); #endif // VERIFY_HEAP } HRESULT GCHeap::StaticShutdown() { deleteGCShadow(); GCScan::GcRuntimeStructuresValid (FALSE); // Cannot assert this, since we use SuspendEE as the mechanism to quiesce all // threads except the one performing the shutdown. // ASSERT( !GcInProgress ); // Guard against any more GC occurring and against any threads blocking // for GC to complete when the GC heap is gone. This fixes a race condition // where a thread in GC is destroyed as part of process destruction and // the remaining threads block for GC complete. //GCTODO //EnterAllocLock(); //Enter(); //EnterFinalizeLock(); //SetGCDone(); // during shutdown lot of threads are suspended // on this even, we don't want to wake them up just yet //CloseHandle (WaitForGCEvent); //find out if the global card table hasn't been used yet uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))]; if (card_table_refcount (ct) == 0) { destroy_card_table (ct); g_gc_card_table = nullptr; #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES g_gc_card_bundle_table = nullptr; #endif #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP SoftwareWriteWatch::StaticClose(); #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP } //destroy all segments on the standby list while(gc_heap::segment_standby_list != 0) { heap_segment* next_seg = heap_segment_next (gc_heap::segment_standby_list); #ifdef MULTIPLE_HEAPS (gc_heap::g_heaps[0])->delete_heap_segment (gc_heap::segment_standby_list, FALSE); #else //MULTIPLE_HEAPS pGenGCHeap->delete_heap_segment (gc_heap::segment_standby_list, FALSE); #endif //MULTIPLE_HEAPS gc_heap::segment_standby_list = next_seg; } #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i ++) { delete gc_heap::g_heaps[i]->vm_heap; //destroy pure GC stuff gc_heap::destroy_gc_heap (gc_heap::g_heaps[i]); } #else gc_heap::destroy_gc_heap (pGenGCHeap); #endif //MULTIPLE_HEAPS gc_heap::shutdown_gc(); return S_OK; } // Wait until a garbage collection is complete // returns NOERROR if wait was OK, other error code if failure. // WARNING: This will not undo the must complete state. If you are // in a must complete when you call this, you'd better know what you're // doing. #ifdef FEATURE_PREMORTEM_FINALIZATION static HRESULT AllocateCFinalize(CFinalize **pCFinalize) { *pCFinalize = new (nothrow) CFinalize(); if (*pCFinalize == NULL || !(*pCFinalize)->Initialize()) return E_OUTOFMEMORY; return S_OK; } #endif // FEATURE_PREMORTEM_FINALIZATION // init the instance heap HRESULT GCHeap::Init(size_t hn) { HRESULT hres = S_OK; #ifdef MULTIPLE_HEAPS if ((pGenGCHeap = gc_heap::make_gc_heap(this, (int)hn)) == 0) hres = E_OUTOFMEMORY; #else UNREFERENCED_PARAMETER(hn); if (!gc_heap::make_gc_heap()) hres = E_OUTOFMEMORY; #endif //MULTIPLE_HEAPS // Failed. return hres; } //System wide initialization HRESULT GCHeap::Initialize() { HRESULT hr = S_OK; qpf = (uint64_t)GCToOSInterface::QueryPerformanceFrequency(); qpf_ms = 1000.0 / (double)qpf; qpf_us = 1000.0 * 1000.0 / (double)qpf; g_gc_pFreeObjectMethodTable = GCToEEInterface::GetFreeObjectMethodTable(); g_num_processors = GCToOSInterface::GetTotalProcessorCount(); assert(g_num_processors != 0); gc_heap::total_physical_mem = (size_t)GCConfig::GetGCTotalPhysicalMemory(); if (gc_heap::total_physical_mem != 0) { gc_heap::is_restricted_physical_mem = true; #ifdef FEATURE_EVENT_TRACE gc_heap::physical_memory_from_config = (size_t)gc_heap::total_physical_mem; #endif //FEATURE_EVENT_TRACE } else { gc_heap::total_physical_mem = GCToOSInterface::GetPhysicalMemoryLimit (&gc_heap::is_restricted_physical_mem); } #ifdef USE_REGIONS gc_heap::regions_range = (size_t)GCConfig::GetGCRegionRange(); #endif //USE_REGIONS #ifdef HOST_64BIT gc_heap::heap_hard_limit = (size_t)GCConfig::GetGCHeapHardLimit(); gc_heap::heap_hard_limit_oh[soh] = (size_t)GCConfig::GetGCHeapHardLimitSOH(); gc_heap::heap_hard_limit_oh[loh] = (size_t)GCConfig::GetGCHeapHardLimitLOH(); gc_heap::heap_hard_limit_oh[poh] = (size_t)GCConfig::GetGCHeapHardLimitPOH(); gc_heap::use_large_pages_p = GCConfig::GetGCLargePages(); if (gc_heap::heap_hard_limit_oh[soh] || gc_heap::heap_hard_limit_oh[loh] || gc_heap::heap_hard_limit_oh[poh]) { if (!gc_heap::heap_hard_limit_oh[soh]) { return CLR_E_GC_BAD_HARD_LIMIT; } if (!gc_heap::heap_hard_limit_oh[loh]) { return CLR_E_GC_BAD_HARD_LIMIT; } gc_heap::heap_hard_limit = gc_heap::heap_hard_limit_oh[soh] + gc_heap::heap_hard_limit_oh[loh] + gc_heap::heap_hard_limit_oh[poh]; } else { uint32_t percent_of_mem_soh = (uint32_t)GCConfig::GetGCHeapHardLimitSOHPercent(); uint32_t percent_of_mem_loh = (uint32_t)GCConfig::GetGCHeapHardLimitLOHPercent(); uint32_t percent_of_mem_poh = (uint32_t)GCConfig::GetGCHeapHardLimitPOHPercent(); if (percent_of_mem_soh || percent_of_mem_loh || percent_of_mem_poh) { if ((percent_of_mem_soh <= 0) || (percent_of_mem_soh >= 100)) { return CLR_E_GC_BAD_HARD_LIMIT; } if ((percent_of_mem_loh <= 0) || (percent_of_mem_loh >= 100)) { return CLR_E_GC_BAD_HARD_LIMIT; } else if ((percent_of_mem_poh < 0) || (percent_of_mem_poh >= 100)) { return CLR_E_GC_BAD_HARD_LIMIT; } if ((percent_of_mem_soh + percent_of_mem_loh + percent_of_mem_poh) >= 100) { return CLR_E_GC_BAD_HARD_LIMIT; } gc_heap::heap_hard_limit_oh[soh] = (size_t)(gc_heap::total_physical_mem * (uint64_t)percent_of_mem_soh / (uint64_t)100); gc_heap::heap_hard_limit_oh[loh] = (size_t)(gc_heap::total_physical_mem * (uint64_t)percent_of_mem_loh / (uint64_t)100); gc_heap::heap_hard_limit_oh[poh] = (size_t)(gc_heap::total_physical_mem * (uint64_t)percent_of_mem_poh / (uint64_t)100); gc_heap::heap_hard_limit = gc_heap::heap_hard_limit_oh[soh] + gc_heap::heap_hard_limit_oh[loh] + gc_heap::heap_hard_limit_oh[poh]; } } if (gc_heap::heap_hard_limit_oh[soh] && (!gc_heap::heap_hard_limit_oh[poh]) && (!gc_heap::use_large_pages_p)) { return CLR_E_GC_BAD_HARD_LIMIT; } if (!(gc_heap::heap_hard_limit)) { uint32_t percent_of_mem = (uint32_t)GCConfig::GetGCHeapHardLimitPercent(); if ((percent_of_mem > 0) && (percent_of_mem < 100)) { gc_heap::heap_hard_limit = (size_t)(gc_heap::total_physical_mem * (uint64_t)percent_of_mem / (uint64_t)100); } } // If the hard limit is specified, the user is saying even if the process is already // running in a container, use this limit for the GC heap. if (gc_heap::heap_hard_limit) { #ifdef FEATURE_EVENT_TRACE gc_heap::hard_limit_config_p = true; #endif //FEATURE_EVENT_TRACE } else { if (gc_heap::is_restricted_physical_mem) { uint64_t physical_mem_for_gc = gc_heap::total_physical_mem * (uint64_t)75 / (uint64_t)100; gc_heap::heap_hard_limit = (size_t)max ((20 * 1024 * 1024), physical_mem_for_gc); } } if ((!gc_heap::heap_hard_limit) && gc_heap::use_large_pages_p) { return CLR_E_GC_LARGE_PAGE_MISSING_HARD_LIMIT; } #endif //HOST_64BIT uint32_t nhp = 1; uint32_t nhp_from_config = 0; #ifdef MULTIPLE_HEAPS AffinitySet config_affinity_set; GCConfigStringHolder cpu_index_ranges_holder(GCConfig::GetGCHeapAffinitizeRanges()); if (!ParseGCHeapAffinitizeRanges(cpu_index_ranges_holder.Get(), &config_affinity_set)) { return CLR_E_GC_BAD_AFFINITY_CONFIG_FORMAT; } uintptr_t config_affinity_mask = static_cast<uintptr_t>(GCConfig::GetGCHeapAffinitizeMask()); const AffinitySet* process_affinity_set = GCToOSInterface::SetGCThreadsAffinitySet(config_affinity_mask, &config_affinity_set); if (process_affinity_set->IsEmpty()) { return CLR_E_GC_BAD_AFFINITY_CONFIG; } if ((cpu_index_ranges_holder.Get() != nullptr) #ifdef TARGET_WINDOWS || (config_affinity_mask != 0) #endif ) { affinity_config_specified_p = true; } nhp_from_config = static_cast<uint32_t>(GCConfig::GetHeapCount()); g_num_active_processors = GCToEEInterface::GetCurrentProcessCpuCount(); if (nhp_from_config) { // Even when the user specifies a heap count, it should not be more // than the number of procs this process can use. nhp_from_config = min (nhp_from_config, g_num_active_processors); } nhp = ((nhp_from_config == 0) ? g_num_active_processors : nhp_from_config); nhp = min (nhp, MAX_SUPPORTED_CPUS); gc_heap::gc_thread_no_affinitize_p = (gc_heap::heap_hard_limit ? !affinity_config_specified_p : (GCConfig::GetNoAffinitize() != 0)); if (!(gc_heap::gc_thread_no_affinitize_p)) { uint32_t num_affinitized_processors = (uint32_t)process_affinity_set->Count(); if (num_affinitized_processors != 0) { nhp = min(nhp, num_affinitized_processors); } } #endif //MULTIPLE_HEAPS size_t seg_size = 0; size_t large_seg_size = 0; size_t pin_seg_size = 0; #ifndef USE_REGIONS if (gc_heap::heap_hard_limit) { if (gc_heap::heap_hard_limit_oh[soh]) { #ifdef MULTIPLE_HEAPS if (nhp_from_config == 0) { for (int i = 0; i < (total_oh_count - 1); i++) { if (i == poh && gc_heap::heap_hard_limit_oh[poh] == 0) { // if size 0 was specified for POH, ignore it for the nhp computation continue; } uint32_t nhp_oh = (uint32_t)(gc_heap::heap_hard_limit_oh[i] / min_segment_size_hard_limit); nhp = min (nhp, nhp_oh); } if (nhp == 0) { nhp = 1; } } #endif seg_size = gc_heap::heap_hard_limit_oh[soh] / nhp; large_seg_size = gc_heap::heap_hard_limit_oh[loh] / nhp; pin_seg_size = (gc_heap::heap_hard_limit_oh[poh] != 0) ? (gc_heap::heap_hard_limit_oh[2] / nhp) : min_segment_size_hard_limit; size_t aligned_seg_size = align_on_segment_hard_limit (seg_size); size_t aligned_large_seg_size = align_on_segment_hard_limit (large_seg_size); size_t aligned_pin_seg_size = align_on_segment_hard_limit (pin_seg_size); if (!gc_heap::use_large_pages_p) { aligned_seg_size = round_up_power2 (aligned_seg_size); aligned_large_seg_size = round_up_power2 (aligned_large_seg_size); aligned_pin_seg_size = round_up_power2 (aligned_pin_seg_size); } size_t seg_size_from_config = (size_t)GCConfig::GetSegmentSize(); if (seg_size_from_config) { size_t aligned_seg_size_config = (gc_heap::use_large_pages_p ? align_on_segment_hard_limit (seg_size) : round_up_power2 (seg_size_from_config)); aligned_seg_size = max (aligned_seg_size, aligned_seg_size_config); aligned_large_seg_size = max (aligned_large_seg_size, aligned_seg_size_config); aligned_pin_seg_size = max (aligned_pin_seg_size, aligned_seg_size_config); } seg_size = aligned_seg_size; gc_heap::soh_segment_size = seg_size; large_seg_size = aligned_large_seg_size; pin_seg_size = aligned_pin_seg_size; } else { seg_size = gc_heap::get_segment_size_hard_limit (&nhp, (nhp_from_config == 0)); gc_heap::soh_segment_size = seg_size; large_seg_size = gc_heap::use_large_pages_p ? seg_size : seg_size * 2; pin_seg_size = large_seg_size; } if (gc_heap::use_large_pages_p) gc_heap::min_segment_size = min_segment_size_hard_limit; } else { seg_size = get_valid_segment_size(); gc_heap::soh_segment_size = seg_size; large_seg_size = get_valid_segment_size (TRUE); pin_seg_size = large_seg_size; } assert (g_theGCHeap->IsValidSegmentSize (seg_size)); assert (g_theGCHeap->IsValidSegmentSize (large_seg_size)); assert (g_theGCHeap->IsValidSegmentSize (pin_seg_size)); dprintf (1, ("%d heaps, soh seg size: %Id mb, loh: %Id mb\n", nhp, (seg_size / (size_t)1024 / 1024), (large_seg_size / 1024 / 1024))); gc_heap::min_uoh_segment_size = min (large_seg_size, pin_seg_size); if (gc_heap::min_segment_size == 0) { gc_heap::min_segment_size = min (seg_size, gc_heap::min_uoh_segment_size); } #endif //!USE_REGIONS #ifdef USE_REGIONS // REGIONS TODO: // soh_segment_size is used by a few places, I'm setting it temporarily and will // get rid of it. gc_heap::soh_segment_size = INITIAL_ALLOC; #ifdef MULTIPLE_HEAPS gc_heap::soh_segment_size /= 4; #endif //MULTIPLE_HEAPS size_t gc_region_size = (size_t)GCConfig::GetGCRegionSize(); if (!power_of_two_p(gc_region_size) || ((gc_region_size * nhp * 19) > gc_heap::regions_range)) { return E_OUTOFMEMORY; } gc_heap::min_segment_size_shr = index_of_highest_set_bit (gc_region_size); #else gc_heap::min_segment_size_shr = index_of_highest_set_bit (gc_heap::min_segment_size); #endif //USE_REGIONS #ifdef MULTIPLE_HEAPS gc_heap::n_heaps = nhp; hr = gc_heap::initialize_gc (seg_size, large_seg_size, pin_seg_size, nhp); #else hr = gc_heap::initialize_gc (seg_size, large_seg_size, pin_seg_size); #endif //MULTIPLE_HEAPS if (hr != S_OK) return hr; gc_heap::mem_one_percent = gc_heap::total_physical_mem / 100; #ifndef MULTIPLE_HEAPS gc_heap::mem_one_percent /= g_num_processors; #endif //!MULTIPLE_HEAPS uint32_t highmem_th_from_config = (uint32_t)GCConfig::GetGCHighMemPercent(); if (highmem_th_from_config) { gc_heap::high_memory_load_th = min (99, highmem_th_from_config); gc_heap::v_high_memory_load_th = min (99, (highmem_th_from_config + 7)); #ifdef FEATURE_EVENT_TRACE gc_heap::high_mem_percent_from_config = highmem_th_from_config; #endif //FEATURE_EVENT_TRACE } else { // We should only use this if we are in the "many process" mode which really is only applicable // to very powerful machines - before that's implemented, temporarily I am only enabling this for 80GB+ memory. // For now I am using an estimate to calculate these numbers but this should really be obtained // programmatically going forward. // I am assuming 47 processes using WKS GC and 3 using SVR GC. // I am assuming 3 in part due to the "very high memory load" is 97%. int available_mem_th = 10; if (gc_heap::total_physical_mem >= ((uint64_t)80 * 1024 * 1024 * 1024)) { int adjusted_available_mem_th = 3 + (int)((float)47 / (float)(GCToOSInterface::GetTotalProcessorCount())); available_mem_th = min (available_mem_th, adjusted_available_mem_th); } gc_heap::high_memory_load_th = 100 - available_mem_th; gc_heap::v_high_memory_load_th = 97; } gc_heap::m_high_memory_load_th = min ((gc_heap::high_memory_load_th + 5), gc_heap::v_high_memory_load_th); gc_heap::pm_stress_on = (GCConfig::GetGCProvModeStress() != 0); #if defined(HOST_64BIT) gc_heap::youngest_gen_desired_th = gc_heap::mem_one_percent; #endif // HOST_64BIT WaitForGCEvent = new (nothrow) GCEvent; if (!WaitForGCEvent) { return E_OUTOFMEMORY; } if (!WaitForGCEvent->CreateManualEventNoThrow(TRUE)) { return E_FAIL; } #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way #if defined (STRESS_HEAP) && !defined (MULTIPLE_HEAPS) if (GCStress<cfg_any>::IsEnabled()) { for (int i = 0; i < GCHeap::NUM_HEAP_STRESS_OBJS; i++) { m_StressObjs[i] = CreateGlobalHandle(0); } m_CurStressObj = 0; } #endif //STRESS_HEAP && !MULTIPLE_HEAPS #endif // FEATURE_REDHAWK initGCShadow(); // If we are debugging write barriers, initialize heap shadow #ifdef MULTIPLE_HEAPS for (uint32_t i = 0; i < nhp; i++) { GCHeap* Hp = new (nothrow) GCHeap(); if (!Hp) return E_OUTOFMEMORY; if ((hr = Hp->Init (i))!= S_OK) { return hr; } } heap_select::init_numa_node_to_heap_map (nhp); // If we have more active processors than heaps we still want to initialize some of the // mapping for the rest of the active processors because user threads can still run on // them which means it's important to know their numa nodes and map them to a reasonable // heap, ie, we wouldn't want to have all such procs go to heap 0. if (g_num_active_processors > nhp) heap_select::distribute_other_procs(); gc_heap* hp = gc_heap::g_heaps[0]; dynamic_data* gen0_dd = hp->dynamic_data_of (0); gc_heap::min_gen0_balance_delta = (dd_min_size (gen0_dd) >> 3); #ifdef HEAP_BALANCE_INSTRUMENTATION cpu_group_enabled_p = GCToOSInterface::CanEnableGCCPUGroups(); if (!GCToOSInterface::GetNumaInfo (&total_numa_nodes_on_machine, &procs_per_numa_node)) { total_numa_nodes_on_machine = 1; // Note that if we are in cpu groups we need to take the way proc index is calculated // into consideration. It would mean we have more than 64 procs on one numa node - // this is mostly for testing (if we want to simulate no numa on a numa system). // see vm\gcenv.os.cpp GroupProcNo implementation. if (GCToOSInterface::GetCPUGroupInfo (&total_cpu_groups_on_machine, &procs_per_cpu_group)) procs_per_numa_node = procs_per_cpu_group + ((total_cpu_groups_on_machine - 1) << 6); else procs_per_numa_node = g_num_processors; } hb_info_numa_nodes = new (nothrow) heap_balance_info_numa[total_numa_nodes_on_machine]; dprintf (HEAP_BALANCE_LOG, ("total: %d, numa: %d", g_num_processors, total_numa_nodes_on_machine)); int hb_info_size_per_proc = sizeof (heap_balance_info_proc); for (int numa_node_index = 0; numa_node_index < total_numa_nodes_on_machine; numa_node_index++) { int hb_info_size_per_node = hb_info_size_per_proc * procs_per_numa_node; uint8_t* numa_mem = (uint8_t*)GCToOSInterface::VirtualReserve (hb_info_size_per_node, 0, 0, numa_node_index); if (!numa_mem) return E_FAIL; if (!GCToOSInterface::VirtualCommit (numa_mem, hb_info_size_per_node, numa_node_index)) return E_FAIL; heap_balance_info_proc* hb_info_procs = (heap_balance_info_proc*)numa_mem; hb_info_numa_nodes[numa_node_index].hb_info_procs = hb_info_procs; for (int proc_index = 0; proc_index < (int)procs_per_numa_node; proc_index++) { heap_balance_info_proc* hb_info_proc = &hb_info_procs[proc_index]; hb_info_proc->count = default_max_hb_heap_balance_info; hb_info_proc->index = 0; } } #endif //HEAP_BALANCE_INSTRUMENTATION #else hr = Init (0); #endif //MULTIPLE_HEAPS #ifdef USE_REGIONS if (initial_regions) { delete[] initial_regions; } #endif //USE_REGIONS if (hr == S_OK) { GCScan::GcRuntimeStructuresValid (TRUE); GCToEEInterface::DiagUpdateGenerationBounds(); #if defined(STRESS_REGIONS) && defined(FEATURE_BASICFREEZE) #ifdef MULTIPLE_HEAPS gc_heap* hp = gc_heap::g_heaps[0]; #else gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS // allocate some artificial ro seg datastructures. for (int i = 0; i < 2; i++) { size_t ro_seg_size = 1024 * 1024; // I'm not allocating this within the normal reserved range // because ro segs are supposed to always be out of range // for regions. uint8_t* seg_mem = new (nothrow) uint8_t [ro_seg_size]; heap_segment* ro_seg = (heap_segment*) seg_mem; uint8_t* start = seg_mem + gc_heap::segment_info_size; heap_segment_mem (ro_seg) = start; heap_segment_used (ro_seg) = start; heap_segment_reserved (ro_seg) = seg_mem + ro_seg_size; heap_segment_committed (ro_seg) = heap_segment_reserved (ro_seg); gc_heap::init_heap_segment (ro_seg, hp, seg_mem, ro_seg_size, 2); ro_seg->flags = heap_segment_flags_readonly; hp->insert_ro_segment (ro_seg); } #endif //STRESS_REGIONS && FEATURE_BASICFREEZE } return hr; } //// // GC callback functions bool GCHeap::IsPromoted(Object* object) { #ifdef _DEBUG if (object) { ((CObjectHeader*)object)->Validate(); } #endif //_DEBUG uint8_t* o = (uint8_t*)object; if (gc_heap::settings.condemned_generation == max_generation) { #ifdef MULTIPLE_HEAPS gc_heap* hp = gc_heap::g_heaps[0]; #else gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS #ifdef BACKGROUND_GC if (gc_heap::settings.concurrent) { bool is_marked = (!((o < hp->background_saved_highest_address) && (o >= hp->background_saved_lowest_address))|| hp->background_marked (o)); return is_marked; } else #endif //BACKGROUND_GC { return (!((o < hp->highest_address) && (o >= hp->lowest_address)) || hp->is_mark_set (o)); } } else { #ifdef USE_REGIONS return (is_in_heap_range (o) ? (gc_heap::is_in_condemned_gc (o) ? gc_heap::is_mark_set (o) : true) : true); #else gc_heap* hp = gc_heap::heap_of (o); return (!((o < hp->gc_high) && (o >= hp->gc_low)) || hp->is_mark_set (o)); #endif //USE_REGIONS } } size_t GCHeap::GetPromotedBytes(int heap_index) { #ifdef BACKGROUND_GC if (gc_heap::settings.concurrent) { return gc_heap::bpromoted_bytes (heap_index); } else #endif //BACKGROUND_GC { gc_heap* hp = #ifdef MULTIPLE_HEAPS gc_heap::g_heaps[heap_index]; #else pGenGCHeap; #endif //MULTIPLE_HEAPS return hp->get_promoted_bytes(); } } void GCHeap::SetYieldProcessorScalingFactor (float scalingFactor) { assert (yp_spin_count_unit != 0); int saved_yp_spin_count_unit = yp_spin_count_unit; yp_spin_count_unit = (int)((float)yp_spin_count_unit * scalingFactor / (float)9); // It's very suspicious if it becomes 0 if (yp_spin_count_unit == 0) { yp_spin_count_unit = saved_yp_spin_count_unit; } } unsigned int GCHeap::WhichGeneration (Object* object) { uint8_t* o = (uint8_t*)object; #ifdef FEATURE_BASICFREEZE if (!((o < g_gc_highest_address) && (o >= g_gc_lowest_address))) { return max_generation; } #endif //FEATURE_BASICFREEZE gc_heap* hp = gc_heap::heap_of (o); unsigned int g = hp->object_gennum (o); dprintf (3, ("%Ix is in gen %d", (size_t)object, g)); return g; } unsigned int GCHeap::GetGenerationWithRange (Object* object, uint8_t** ppStart, uint8_t** ppAllocated, uint8_t** ppReserved) { int generation = -1; heap_segment * hs = gc_heap::find_segment ((uint8_t*)object, FALSE); #ifdef USE_REGIONS generation = heap_segment_gen_num (hs); if (generation == max_generation) { if (heap_segment_loh_p (hs)) { generation = loh_generation; } else if (heap_segment_poh_p (hs)) { generation = poh_generation; } } *ppStart = heap_segment_mem (hs); *ppAllocated = heap_segment_allocated (hs); *ppReserved = heap_segment_reserved (hs); #else #ifdef MULTIPLE_HEAPS gc_heap* hp = heap_segment_heap (hs); #else gc_heap* hp = __this; #endif //MULTIPLE_HEAPS if (hs == hp->ephemeral_heap_segment) { uint8_t* reserved = heap_segment_reserved (hs); uint8_t* end = heap_segment_allocated(hs); for (int gen = 0; gen < max_generation; gen++) { uint8_t* start = generation_allocation_start (hp->generation_of (gen)); if ((uint8_t*)object >= start) { generation = gen; *ppStart = start; *ppAllocated = end; *ppReserved = reserved; break; } end = reserved = start; } if (generation == -1) { generation = max_generation; *ppStart = heap_segment_mem (hs); *ppAllocated = *ppReserved = generation_allocation_start (hp->generation_of (max_generation - 1)); } } else { generation = max_generation; if (heap_segment_loh_p (hs)) { generation = loh_generation; } else if (heap_segment_poh_p (hs)) { generation = poh_generation; } *ppStart = heap_segment_mem (hs); *ppAllocated = heap_segment_allocated (hs); *ppReserved = heap_segment_reserved (hs); } #endif //USE_REGIONS return (unsigned int)generation; } bool GCHeap::IsEphemeral (Object* object) { uint8_t* o = (uint8_t*)object; gc_heap* hp = gc_heap::heap_of (o); return !!hp->ephemeral_pointer_p (o); } // Return NULL if can't find next object. When EE is not suspended, // the result is not accurate: if the input arg is in gen0, the function could // return zeroed out memory as next object Object * GCHeap::NextObj (Object * object) { #ifdef VERIFY_HEAP uint8_t* o = (uint8_t*)object; #ifndef FEATURE_BASICFREEZE if (!((o < g_gc_highest_address) && (o >= g_gc_lowest_address))) { return NULL; } #endif //!FEATURE_BASICFREEZE heap_segment * hs = gc_heap::find_segment (o, FALSE); if (!hs) { return NULL; } BOOL large_object_p = heap_segment_uoh_p (hs); if (large_object_p) return NULL; //could be racing with another core allocating. #ifdef MULTIPLE_HEAPS gc_heap* hp = heap_segment_heap (hs); #else //MULTIPLE_HEAPS gc_heap* hp = 0; #endif //MULTIPLE_HEAPS #ifdef USE_REGIONS unsigned int g = heap_segment_gen_num (hs); #else unsigned int g = hp->object_gennum ((uint8_t*)object); #endif if ((g == 0) && hp->settings.demotion) return NULL;//could be racing with another core allocating. int align_const = get_alignment_constant (!large_object_p); uint8_t* nextobj = o + Align (size (o), align_const); if (nextobj <= o) // either overflow or 0 sized object. { return NULL; } if ((nextobj < heap_segment_mem(hs)) || (nextobj >= heap_segment_allocated(hs) && hs != hp->ephemeral_heap_segment) || (nextobj >= hp->alloc_allocated)) { return NULL; } return (Object *)nextobj; #else return nullptr; #endif // VERIFY_HEAP } // returns TRUE if the pointer is in one of the GC heaps. bool GCHeap::IsHeapPointer (void* vpObject, bool small_heap_only) { uint8_t* object = (uint8_t*) vpObject; #ifndef FEATURE_BASICFREEZE if (!((object < g_gc_highest_address) && (object >= g_gc_lowest_address))) return FALSE; #endif //!FEATURE_BASICFREEZE heap_segment * hs = gc_heap::find_segment (object, small_heap_only); return !!hs; } void GCHeap::Promote(Object** ppObject, ScanContext* sc, uint32_t flags) { THREAD_NUMBER_FROM_CONTEXT; #ifndef MULTIPLE_HEAPS const int thread = 0; #endif //!MULTIPLE_HEAPS uint8_t* o = (uint8_t*)*ppObject; if (o == 0) return; #ifdef DEBUG_DestroyedHandleValue // we can race with destroy handle during concurrent scan if (o == (uint8_t*)DEBUG_DestroyedHandleValue) return; #endif //DEBUG_DestroyedHandleValue HEAP_FROM_THREAD; gc_heap* hp = gc_heap::heap_of (o); #ifdef USE_REGIONS if (!gc_heap::is_in_condemned (o)) #else //USE_REGIONS if ((o < hp->gc_low) || (o >= hp->gc_high)) #endif //USE_REGIONS { return; } dprintf (3, ("Promote %Ix", (size_t)o)); if (flags & GC_CALL_INTERIOR) { if ((o = hp->find_object (o)) == 0) { return; } } #ifdef FEATURE_CONSERVATIVE_GC // For conservative GC, a value on stack may point to middle of a free object. // In this case, we don't need to promote the pointer. if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree()) { return; } #endif #ifdef _DEBUG ((CObjectHeader*)o)->Validate(); #else UNREFERENCED_PARAMETER(sc); #endif //_DEBUG if (flags & GC_CALL_PINNED) hp->pin_object (o, (uint8_t**) ppObject); #ifdef STRESS_PINNING if ((++n_promote % 20) == 1) hp->pin_object (o, (uint8_t**) ppObject); #endif //STRESS_PINNING hpt->mark_object_simple (&o THREAD_NUMBER_ARG); STRESS_LOG_ROOT_PROMOTE(ppObject, o, o ? header(o)->GetMethodTable() : NULL); } void GCHeap::Relocate (Object** ppObject, ScanContext* sc, uint32_t flags) { UNREFERENCED_PARAMETER(sc); uint8_t* object = (uint8_t*)(Object*)(*ppObject); THREAD_NUMBER_FROM_CONTEXT; //dprintf (3, ("Relocate location %Ix\n", (size_t)ppObject)); dprintf (3, ("R: %Ix", (size_t)ppObject)); if (!object || !((object >= g_gc_lowest_address) && (object < g_gc_highest_address))) return; gc_heap* hp = gc_heap::heap_of (object); #ifdef _DEBUG if (!(flags & GC_CALL_INTERIOR)) { // We cannot validate this object if it's in the condemned gen because it could // be one of the objects that were overwritten by an artificial gap due to a pinned plug. #ifdef USE_REGIONS if (!gc_heap::is_in_condemned_gc (object)) #else //USE_REGIONS if (!((object >= hp->gc_low) && (object < hp->gc_high))) #endif //USE_REGIONS { ((CObjectHeader*)object)->Validate(FALSE); } } #endif //_DEBUG dprintf (3, ("Relocate %Ix\n", (size_t)object)); uint8_t* pheader; if ((flags & GC_CALL_INTERIOR) && gc_heap::settings.loh_compaction) { #ifdef USE_REGIONS if (!gc_heap::is_in_condemned_gc (object)) #else //USE_REGIONS if (!((object >= hp->gc_low) && (object < hp->gc_high))) #endif //USE_REGIONS { return; } if (gc_heap::loh_object_p (object)) { pheader = hp->find_object (object); if (pheader == 0) { return; } ptrdiff_t ref_offset = object - pheader; hp->relocate_address(&pheader THREAD_NUMBER_ARG); *ppObject = (Object*)(pheader + ref_offset); return; } } { pheader = object; hp->relocate_address(&pheader THREAD_NUMBER_ARG); *ppObject = (Object*)pheader; } STRESS_LOG_ROOT_RELOCATE(ppObject, object, pheader, ((!(flags & GC_CALL_INTERIOR)) ? ((Object*)object)->GetGCSafeMethodTable() : 0)); } /*static*/ bool GCHeap::IsLargeObject(Object *pObj) { return size( pObj ) >= loh_size_threshold; } #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way #ifdef STRESS_HEAP void StressHeapDummy (); // CLRRandom implementation can produce FPU exceptions if // the test/application run by CLR is enabling any FPU exceptions. // We want to avoid any unexpected exception coming from stress // infrastructure, so CLRRandom is not an option. // The code below is a replicate of CRT rand() implementation. // Using CRT rand() is not an option because we will interfere with the user application // that may also use it. int StressRNG(int iMaxValue) { static BOOL bisRandInit = FALSE; static int lHoldrand = 1L; if (!bisRandInit) { lHoldrand = (int)time(NULL); bisRandInit = TRUE; } int randValue = (((lHoldrand = lHoldrand * 214013L + 2531011L) >> 16) & 0x7fff); return randValue % iMaxValue; } #endif // STRESS_HEAP #endif // !FEATURE_REDHAWK // free up object so that things will move and then do a GC //return TRUE if GC actually happens, otherwise FALSE bool GCHeap::StressHeap(gc_alloc_context * context) { #if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK) alloc_context* acontext = static_cast<alloc_context*>(context); assert(context != nullptr); // if GC stress was dynamically disabled during this run we return FALSE if (!GCStressPolicy::IsEnabled()) return FALSE; #ifdef _DEBUG if (g_pConfig->FastGCStressLevel() && !GCToEEInterface::GetThread()->StressHeapIsEnabled()) { return FALSE; } #endif //_DEBUG if ((g_pConfig->GetGCStressLevel() & EEConfig::GCSTRESS_UNIQUE) #ifdef _DEBUG || g_pConfig->FastGCStressLevel() > 1 #endif //_DEBUG ) { if (!Thread::UniqueStack(&acontext)) { return FALSE; } } #ifdef BACKGROUND_GC // don't trigger a GC from the GC threads but still trigger GCs from user threads. if (GCToEEInterface::WasCurrentThreadCreatedByGC()) { return FALSE; } #endif //BACKGROUND_GC if (g_pStringClass == 0) { // If the String class has not been loaded, dont do any stressing. This should // be kept to a minimum to get as complete coverage as possible. _ASSERTE(g_fEEInit); return FALSE; } #ifndef MULTIPLE_HEAPS static int32_t OneAtATime = -1; // Only bother with this if the stress level is big enough and if nobody else is // doing it right now. Note that some callers are inside the AllocLock and are // guaranteed synchronized. But others are using AllocationContexts and have no // particular synchronization. // // For this latter case, we want a very high-speed way of limiting this to one // at a time. A secondary advantage is that we release part of our StressObjs // buffer sparingly but just as effectively. if (Interlocked::Increment(&OneAtATime) == 0 && !TrackAllocations()) // Messing with object sizes can confuse the profiler (see ICorProfilerInfo::GetObjectSize) { StringObject* str; // If the current string is used up if (HndFetchHandle(m_StressObjs[m_CurStressObj]) == 0) { // Populate handles with strings int i = m_CurStressObj; while(HndFetchHandle(m_StressObjs[i]) == 0) { _ASSERTE(m_StressObjs[i] != 0); unsigned strLen = ((unsigned)loh_size_threshold - 32) / sizeof(WCHAR); unsigned strSize = PtrAlign(StringObject::GetSize(strLen)); // update the cached type handle before allocating SetTypeHandleOnThreadForAlloc(TypeHandle(g_pStringClass)); str = (StringObject*) pGenGCHeap->allocate (strSize, acontext, /*flags*/ 0); if (str) { str->SetMethodTable (g_pStringClass); str->SetStringLength (strLen); HndAssignHandle(m_StressObjs[i], ObjectToOBJECTREF(str)); } i = (i + 1) % NUM_HEAP_STRESS_OBJS; if (i == m_CurStressObj) break; } // advance the current handle to the next string m_CurStressObj = (m_CurStressObj + 1) % NUM_HEAP_STRESS_OBJS; } // Get the current string str = (StringObject*) OBJECTREFToObject(HndFetchHandle(m_StressObjs[m_CurStressObj])); if (str) { // Chop off the end of the string and form a new object out of it. // This will 'free' an object at the beginning of the heap, which will // force data movement. Note that we can only do this so many times. // before we have to move on to the next string. unsigned sizeOfNewObj = (unsigned)Align(min_obj_size * 31); if (str->GetStringLength() > sizeOfNewObj / sizeof(WCHAR)) { unsigned sizeToNextObj = (unsigned)Align(size(str)); uint8_t* freeObj = ((uint8_t*) str) + sizeToNextObj - sizeOfNewObj; pGenGCHeap->make_unused_array (freeObj, sizeOfNewObj); #if !defined(TARGET_AMD64) && !defined(TARGET_X86) // ensure that the write to the new free object is seen by // background GC *before* the write to the string length below MemoryBarrier(); #endif str->SetStringLength(str->GetStringLength() - (sizeOfNewObj / sizeof(WCHAR))); } else { // Let the string itself become garbage. // will be realloced next time around HndAssignHandle(m_StressObjs[m_CurStressObj], 0); } } } Interlocked::Decrement(&OneAtATime); #endif // !MULTIPLE_HEAPS if (IsConcurrentGCEnabled()) { int rgen = StressRNG(10); // gen0:gen1:gen2 distribution: 40:40:20 if (rgen >= 8) rgen = 2; else if (rgen >= 4) rgen = 1; else rgen = 0; GarbageCollectTry (rgen, FALSE, collection_gcstress); } else { GarbageCollect(max_generation, FALSE, collection_gcstress); } return TRUE; #else UNREFERENCED_PARAMETER(context); return FALSE; #endif //STRESS_HEAP && !FEATURE_REDHAWK } #ifdef FEATURE_PREMORTEM_FINALIZATION #define REGISTER_FOR_FINALIZATION(_object, _size) \ hp->finalize_queue->RegisterForFinalization (0, (_object), (_size)) #else // FEATURE_PREMORTEM_FINALIZATION #define REGISTER_FOR_FINALIZATION(_object, _size) true #endif // FEATURE_PREMORTEM_FINALIZATION #define CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(_object, _size, _register) do { \ if ((_object) == NULL || ((_register) && !REGISTER_FOR_FINALIZATION(_object, _size))) \ { \ STRESS_LOG_OOM_STACK(_size); \ return NULL; \ } \ } while (false) #ifdef FEATURE_64BIT_ALIGNMENT // Allocate small object with an alignment requirement of 8-bytes. Object* AllocAlign8(alloc_context* acontext, gc_heap* hp, size_t size, uint32_t flags) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; Object* newAlloc = NULL; // Depending on where in the object the payload requiring 8-byte alignment resides we might have to // align the object header on an 8-byte boundary or midway between two such boundaries. The unaligned // case is indicated to the GC via the GC_ALLOC_ALIGN8_BIAS flag. size_t desiredAlignment = (flags & GC_ALLOC_ALIGN8_BIAS) ? 4 : 0; // Retrieve the address of the next allocation from the context (note that we're inside the alloc // lock at this point). uint8_t* result = acontext->alloc_ptr; // Will an allocation at this point yield the correct alignment and fit into the remainder of the // context? if ((((size_t)result & 7) == desiredAlignment) && ((result + size) <= acontext->alloc_limit)) { // Yes, we can just go ahead and make the allocation. newAlloc = (Object*) hp->allocate (size, acontext, flags); ASSERT(((size_t)newAlloc & 7) == desiredAlignment); } else { // No, either the next available address is not aligned in the way we require it or there's // not enough space to allocate an object of the required size. In both cases we allocate a // padding object (marked as a free object). This object's size is such that it will reverse // the alignment of the next header (asserted below). // // We allocate both together then decide based on the result whether we'll format the space as // free object + real object or real object + free object. ASSERT((Align(min_obj_size) & 7) == 4); CObjectHeader *freeobj = (CObjectHeader*) hp->allocate (Align(size) + Align(min_obj_size), acontext, flags); if (freeobj) { if (((size_t)freeobj & 7) == desiredAlignment) { // New allocation has desired alignment, return this one and place the free object at the // end of the allocated space. newAlloc = (Object*)freeobj; freeobj = (CObjectHeader*)((uint8_t*)freeobj + Align(size)); } else { // New allocation is still mis-aligned, format the initial space as a free object and the // rest of the space should be correctly aligned for the real object. newAlloc = (Object*)((uint8_t*)freeobj + Align(min_obj_size)); ASSERT(((size_t)newAlloc & 7) == desiredAlignment); if (flags & GC_ALLOC_ZEROING_OPTIONAL) { // clean the syncblock of the aligned object. *(((PTR_PTR)newAlloc)-1) = 0; } } freeobj->SetFree(min_obj_size); } } return newAlloc; } #endif // FEATURE_64BIT_ALIGNMENT Object* GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_DCL) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; TRIGGERSGC(); Object* newAlloc = NULL; alloc_context* acontext = static_cast<alloc_context*>(context); #ifdef MULTIPLE_HEAPS if (acontext->get_alloc_heap() == 0) { AssignHeap (acontext); assert (acontext->get_alloc_heap()); } gc_heap* hp = acontext->get_alloc_heap()->pGenGCHeap; #else gc_heap* hp = pGenGCHeap; #ifdef _PREFAST_ // prefix complains about us dereferencing hp in wks build even though we only access static members // this way. not sure how to shut it up except for this ugly workaround: PREFIX_ASSUME(hp != NULL); #endif //_PREFAST_ #endif //MULTIPLE_HEAPS assert(size < loh_size_threshold || (flags & GC_ALLOC_LARGE_OBJECT_HEAP)); if (flags & GC_ALLOC_USER_OLD_HEAP) { // The LOH always guarantees at least 8-byte alignment, regardless of platform. Moreover it doesn't // support mis-aligned object headers so we can't support biased headers. Luckily for us // we've managed to arrange things so the only case where we see a bias is for boxed value types and // these can never get large enough to be allocated on the LOH. ASSERT((flags & GC_ALLOC_ALIGN8_BIAS) == 0); ASSERT(65536 < loh_size_threshold); int gen_num = (flags & GC_ALLOC_PINNED_OBJECT_HEAP) ? poh_generation : loh_generation; newAlloc = (Object*) hp->allocate_uoh_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), flags, gen_num, acontext->alloc_bytes_uoh); ASSERT(((size_t)newAlloc & 7) == 0); #ifdef MULTIPLE_HEAPS if (flags & GC_ALLOC_FINALIZE) { // the heap may have changed due to heap balancing - it's important // to register the object for finalization on the heap it was allocated on hp = gc_heap::heap_of ((uint8_t*)newAlloc); } #endif //MULTIPLE_HEAPS #ifdef FEATURE_STRUCTALIGN newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size); #endif // FEATURE_STRUCTALIGN } else { #ifdef FEATURE_64BIT_ALIGNMENT if (flags & GC_ALLOC_ALIGN8) { newAlloc = AllocAlign8 (acontext, hp, size, flags); } else #else assert ((flags & GC_ALLOC_ALIGN8) == 0); #endif { newAlloc = (Object*) hp->allocate (size + ComputeMaxStructAlignPad(requiredAlignment), acontext, flags); } #ifdef MULTIPLE_HEAPS if (flags & GC_ALLOC_FINALIZE) { // the heap may have changed due to heap balancing - it's important // to register the object for finalization on the heap it was allocated on hp = acontext->get_alloc_heap()->pGenGCHeap; assert ((newAlloc == nullptr) || (hp == gc_heap::heap_of ((uint8_t*)newAlloc))); } #endif //MULTIPLE_HEAPS #ifdef FEATURE_STRUCTALIGN newAlloc = (Object*) hp->pad_for_alignment ((uint8_t*) newAlloc, requiredAlignment, size, acontext); #endif // FEATURE_STRUCTALIGN } CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE); #ifdef USE_REGIONS assert (IsHeapPointer (newAlloc)); #endif //USE_REGIONS return newAlloc; } void GCHeap::FixAllocContext (gc_alloc_context* context, void* arg, void *heap) { alloc_context* acontext = static_cast<alloc_context*>(context); #ifdef MULTIPLE_HEAPS if (arg != 0) acontext->alloc_count = 0; uint8_t * alloc_ptr = acontext->alloc_ptr; if (!alloc_ptr) return; // The acontext->alloc_heap can be out of sync with the ptrs because // of heap re-assignment in allocate gc_heap* hp = gc_heap::heap_of (alloc_ptr); #else gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS if (heap == NULL || heap == hp) { hp->fix_allocation_context (acontext, ((arg != 0)? TRUE : FALSE), TRUE); } } Object* GCHeap::GetContainingObject (void *pInteriorPtr, bool fCollectedGenOnly) { uint8_t *o = (uint8_t*)pInteriorPtr; gc_heap* hp = gc_heap::heap_of (o); #ifdef USE_REGIONS if (fCollectedGenOnly) { if (!gc_heap::is_in_condemned (o)) { return NULL; } } else { if (!((o >= g_gc_lowest_address) && (o < g_gc_highest_address))) return NULL; } #else //USE_REGIONS uint8_t* lowest = (fCollectedGenOnly ? hp->gc_low : hp->lowest_address); uint8_t* highest = (fCollectedGenOnly ? hp->gc_high : hp->highest_address); if (!((o >= lowest) && (o < highest))) { return NULL; } #endif //USE_REGIONS return (Object*)(hp->find_object (o)); } BOOL should_collect_optimized (dynamic_data* dd, BOOL low_memory_p) { if (dd_new_allocation (dd) < 0) { return TRUE; } if (((float)(dd_new_allocation (dd)) / (float)dd_desired_allocation (dd)) < (low_memory_p ? 0.7 : 0.3)) { return TRUE; } return FALSE; } //---------------------------------------------------------------------------- // #GarbageCollector // // API to ensure that a complete new garbage collection takes place // HRESULT GCHeap::GarbageCollect (int generation, bool low_memory_p, int mode) { #if defined(HOST_64BIT) if (low_memory_p) { size_t total_allocated = 0; size_t total_desired = 0; #ifdef MULTIPLE_HEAPS int hn = 0; for (hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; total_desired += dd_desired_allocation (hp->dynamic_data_of (0)); total_allocated += dd_desired_allocation (hp->dynamic_data_of (0))- dd_new_allocation (hp->dynamic_data_of (0)); } #else gc_heap* hp = pGenGCHeap; total_desired = dd_desired_allocation (hp->dynamic_data_of (0)); total_allocated = dd_desired_allocation (hp->dynamic_data_of (0))- dd_new_allocation (hp->dynamic_data_of (0)); #endif //MULTIPLE_HEAPS if ((total_desired > gc_heap::mem_one_percent) && (total_allocated < gc_heap::mem_one_percent)) { dprintf (2, ("Async low mem but we've only allocated %d (< 10%% of physical mem) out of %d, returning", total_allocated, total_desired)); return S_OK; } } #endif // HOST_64BIT #ifdef MULTIPLE_HEAPS gc_heap* hpt = gc_heap::g_heaps[0]; #else gc_heap* hpt = 0; #endif //MULTIPLE_HEAPS generation = (generation < 0) ? max_generation : min (generation, max_generation); dynamic_data* dd = hpt->dynamic_data_of (generation); #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { if ((mode == collection_optimized) || (mode & collection_non_blocking)) { return S_OK; } if (mode & collection_blocking) { pGenGCHeap->background_gc_wait(); if (mode & collection_optimized) { return S_OK; } } } #endif //BACKGROUND_GC if (mode & collection_optimized) { if (pGenGCHeap->gc_started) { return S_OK; } else { BOOL should_collect = FALSE; BOOL should_check_uoh = (generation == max_generation); #ifdef MULTIPLE_HEAPS for (int heap_number = 0; heap_number < gc_heap::n_heaps; heap_number++) { dynamic_data* dd1 = gc_heap::g_heaps [heap_number]->dynamic_data_of (generation); should_collect = should_collect_optimized (dd1, low_memory_p); if (should_check_uoh) { for (int i = uoh_start_generation; i < total_generation_count && !should_collect; i++) { should_collect = should_collect_optimized (gc_heap::g_heaps [heap_number]->dynamic_data_of (i), low_memory_p); } } if (should_collect) break; } #else should_collect = should_collect_optimized (dd, low_memory_p); if (should_check_uoh) { for (int i = uoh_start_generation; i < total_generation_count && !should_collect; i++) { should_collect = should_collect_optimized (hpt->dynamic_data_of (i), low_memory_p); } } #endif //MULTIPLE_HEAPS if (!should_collect) { return S_OK; } } } size_t CollectionCountAtEntry = dd_collection_count (dd); size_t BlockingCollectionCountAtEntry = gc_heap::full_gc_counts[gc_type_blocking]; size_t CurrentCollectionCount = 0; retry: CurrentCollectionCount = GarbageCollectTry(generation, low_memory_p, mode); if ((mode & collection_blocking) && (generation == max_generation) && (gc_heap::full_gc_counts[gc_type_blocking] == BlockingCollectionCountAtEntry)) { #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { pGenGCHeap->background_gc_wait(); } #endif //BACKGROUND_GC goto retry; } if (CollectionCountAtEntry == CurrentCollectionCount) { goto retry; } return S_OK; } size_t GCHeap::GarbageCollectTry (int generation, BOOL low_memory_p, int mode) { int gen = (generation < 0) ? max_generation : min (generation, max_generation); gc_reason reason = reason_empty; if (low_memory_p) { if (mode & collection_blocking) { reason = reason_lowmemory_blocking; } else { reason = reason_lowmemory; } } else { reason = reason_induced; } if (reason == reason_induced) { if (mode & collection_compacting) { reason = reason_induced_compacting; } else if (mode & collection_non_blocking) { reason = reason_induced_noforce; } #ifdef STRESS_HEAP else if (mode & collection_gcstress) { reason = reason_gcstress; } #endif } return GarbageCollectGeneration (gen, reason); } #ifdef BACKGROUND_GC void gc_heap::add_bgc_pause_duration_0() { if (settings.concurrent) { uint64_t suspended_end_ts = GetHighPrecisionTimeStamp(); size_t pause_duration = (size_t)(suspended_end_ts - suspended_start_time); last_recorded_gc_info* last_gc_info = &(last_bgc_info[last_bgc_info_index]); last_gc_info->pause_durations[0] = pause_duration; if (last_gc_info->index < last_ephemeral_gc_info.index) { last_gc_info->pause_durations[0] -= last_ephemeral_gc_info.pause_durations[0]; } total_suspended_time += last_gc_info->pause_durations[0]; } } last_recorded_gc_info* gc_heap::get_completed_bgc_info() { int completed_bgc_index = gc_heap::background_running_p() ? (int)(!(gc_heap::last_bgc_info_index)) : (int)gc_heap::last_bgc_info_index; return &gc_heap::last_bgc_info[completed_bgc_index]; } #endif //BACKGROUND_GC void gc_heap::do_pre_gc() { STRESS_LOG_GC_STACK; #ifdef STRESS_LOG STRESS_LOG_GC_START(VolatileLoad(&settings.gc_index), (uint32_t)settings.condemned_generation, (uint32_t)settings.reason); #endif // STRESS_LOG #ifdef MULTIPLE_HEAPS gc_heap* hp = g_heaps[0]; #else gc_heap* hp = 0; #endif //MULTIPLE_HEAPS #ifdef BACKGROUND_GC settings.b_state = hp->current_bgc_state; if (settings.concurrent) { last_bgc_info_index = !last_bgc_info_index; last_bgc_info[last_bgc_info_index].index = settings.gc_index; } #endif //BACKGROUND_GC #ifdef TRACE_GC size_t total_allocated_since_last_gc = get_total_allocated_since_last_gc(); #ifdef BACKGROUND_GC dprintf (1, (ThreadStressLog::gcDetailedStartMsg(), VolatileLoad(&settings.gc_index), dd_collection_count (hp->dynamic_data_of (0)), settings.condemned_generation, total_allocated_since_last_gc, (settings.concurrent ? "BGC" : (gc_heap::background_running_p() ? "FGC" : "NGC")), settings.b_state)); #else dprintf (1, ("*GC* %d(gen0:%d)(%d)(alloc: %Id)", VolatileLoad(&settings.gc_index), dd_collection_count(hp->dynamic_data_of(0)), settings.condemned_generation, total_allocated_since_last_gc)); #endif //BACKGROUND_GC if (heap_hard_limit) { size_t total_heap_committed = get_total_committed_size(); size_t total_heap_committed_recorded = current_total_committed - current_total_committed_bookkeeping; dprintf (1, ("(%d)GC commit BEG #%Id: %Id (recorded: %Id = %Id-%Id)", settings.condemned_generation, (size_t)settings.gc_index, total_heap_committed, total_heap_committed_recorded, current_total_committed, current_total_committed_bookkeeping)); } #endif //TRACE_GC GCHeap::UpdatePreGCCounters(); #if defined(__linux__) GCToEEInterface::UpdateGCEventStatus(static_cast<int>(GCEventStatus::GetEnabledLevel(GCEventProvider_Default)), static_cast<int>(GCEventStatus::GetEnabledKeywords(GCEventProvider_Default)), static_cast<int>(GCEventStatus::GetEnabledLevel(GCEventProvider_Private)), static_cast<int>(GCEventStatus::GetEnabledKeywords(GCEventProvider_Private))); #endif // __linux__ if (settings.concurrent) { #ifdef BACKGROUND_GC full_gc_counts[gc_type_background]++; #endif // BACKGROUND_GC } else { if (settings.condemned_generation == max_generation) { full_gc_counts[gc_type_blocking]++; } else { #ifdef BACKGROUND_GC if (settings.background_p) { ephemeral_fgc_counts[settings.condemned_generation]++; } #endif //BACKGROUND_GC } } } #ifdef GC_CONFIG_DRIVEN void gc_heap::record_interesting_info_per_heap() { // datapoints are always from the last blocking GC so don't record again // for BGCs. if (!(settings.concurrent)) { for (int i = 0; i < max_idp_count; i++) { interesting_data_per_heap[i] += interesting_data_per_gc[i]; } } int compact_reason = get_gc_data_per_heap()->get_mechanism (gc_heap_compact); if (compact_reason >= 0) (compact_reasons_per_heap[compact_reason])++; int expand_mechanism = get_gc_data_per_heap()->get_mechanism (gc_heap_expand); if (expand_mechanism >= 0) (expand_mechanisms_per_heap[expand_mechanism])++; for (int i = 0; i < max_gc_mechanism_bits_count; i++) { if (get_gc_data_per_heap()->is_mechanism_bit_set ((gc_mechanism_bit_per_heap)i)) (interesting_mechanism_bits_per_heap[i])++; } // h# | GC | gen | C | EX | NF | BF | ML | DM || PreS | PostS | Merge | Conv | Pre | Post | PrPo | PreP | PostP | cprintf (("%2d | %6d | %1d | %1s | %2s | %2s | %2s | %2s | %2s || %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id |", heap_number, (size_t)settings.gc_index, settings.condemned_generation, // TEMP - I am just doing this for wks GC 'cause I wanna see the pattern of doing C/S GCs. (settings.compaction ? (((compact_reason >= 0) && gc_heap_compact_reason_mandatory_p[compact_reason]) ? "M" : "W") : ""), // compaction ((expand_mechanism >= 0)? "X" : ""), // EX ((expand_mechanism == expand_reuse_normal) ? "X" : ""), // NF ((expand_mechanism == expand_reuse_bestfit) ? "X" : ""), // BF (get_gc_data_per_heap()->is_mechanism_bit_set (gc_mark_list_bit) ? "X" : ""), // ML (get_gc_data_per_heap()->is_mechanism_bit_set (gc_demotion_bit) ? "X" : ""), // DM interesting_data_per_gc[idp_pre_short], interesting_data_per_gc[idp_post_short], interesting_data_per_gc[idp_merged_pin], interesting_data_per_gc[idp_converted_pin], interesting_data_per_gc[idp_pre_pin], interesting_data_per_gc[idp_post_pin], interesting_data_per_gc[idp_pre_and_post_pin], interesting_data_per_gc[idp_pre_short_padded], interesting_data_per_gc[idp_post_short_padded])); } void gc_heap::record_global_mechanisms() { for (int i = 0; i < max_global_mechanisms_count; i++) { if (gc_data_global.get_mechanism_p ((gc_global_mechanism_p)i)) { ::record_global_mechanism (i); } } } BOOL gc_heap::should_do_sweeping_gc (BOOL compact_p) { if (!compact_ratio) return (!compact_p); size_t compact_count = compact_or_sweep_gcs[0]; size_t sweep_count = compact_or_sweep_gcs[1]; size_t total_count = compact_count + sweep_count; BOOL should_compact = compact_p; if (total_count > 3) { if (compact_p) { int temp_ratio = (int)((compact_count + 1) * 100 / (total_count + 1)); if (temp_ratio > compact_ratio) { // cprintf (("compact would be: %d, total_count: %d, ratio would be %d%% > target\n", // (compact_count + 1), (total_count + 1), temp_ratio)); should_compact = FALSE; } } else { int temp_ratio = (int)((sweep_count + 1) * 100 / (total_count + 1)); if (temp_ratio > (100 - compact_ratio)) { // cprintf (("sweep would be: %d, total_count: %d, ratio would be %d%% > target\n", // (sweep_count + 1), (total_count + 1), temp_ratio)); should_compact = TRUE; } } } return !should_compact; } #endif //GC_CONFIG_DRIVEN #ifdef BGC_SERVO_TUNING // virtual_fl_size is only used for NGC2 void gc_heap::check_and_adjust_bgc_tuning (int gen_number, size_t physical_size, ptrdiff_t virtual_fl_size) { // For LOH we need to check more often to catch things like when the size grows too much. int min_gen_to_check = ((gen_number == max_generation) ? (max_generation - 1) : 0); if (settings.condemned_generation >= min_gen_to_check) { #ifdef MULTIPLE_HEAPS gc_heap* hp = g_heaps[0]; #else gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS size_t total_gen_size = physical_size; size_t total_generation_fl_size = get_total_generation_fl_size (gen_number); double gen_flr = (double)total_generation_fl_size * 100.0 / (double)total_gen_size; size_t gen1_index = dd_collection_count (hp->dynamic_data_of (max_generation - 1)); size_t gen2_index = dd_collection_count (hp->dynamic_data_of (max_generation)); bgc_tuning::tuning_calculation* current_gen_calc = &bgc_tuning::gen_calc[gen_number - max_generation]; bgc_tuning::tuning_stats* current_gen_stats = &bgc_tuning::gen_stats[gen_number - max_generation]; bool gen_size_inc_p = (total_gen_size > current_gen_calc->last_bgc_size); if ((settings.condemned_generation >= min_gen_to_check) && (settings.condemned_generation != max_generation)) { if (gen_size_inc_p) { current_gen_stats->last_gen_increase_flr = gen_flr; dprintf (BGC_TUNING_LOG, ("BTLp[g1: %Id, g2: %Id]: gen%d size inc %s %Id->%Id, flr: %.3f", gen1_index, gen2_index, gen_number, (gc_heap::background_running_p() ? "during bgc" : ""), current_gen_stats->last_bgc_physical_size, total_gen_size, gen_flr)); } if (!bgc_tuning::fl_tuning_triggered) { if (bgc_tuning::enable_fl_tuning) { if (!((gc_heap::background_running_p() || (hp->current_bgc_state == bgc_initialized)))) { assert (settings.entry_memory_load); // We start when we are 2/3 way there so we don't overshoot. if ((settings.entry_memory_load >= (bgc_tuning::memory_load_goal * 2 / 3)) && (full_gc_counts[gc_type_background] >= 2)) { bgc_tuning::next_bgc_p = true; current_gen_calc->first_alloc_to_trigger = get_total_servo_alloc (gen_number); dprintf (BGC_TUNING_LOG, ("BTL[g1: %Id] mem high enough: %d(goal: %d), gen%d fl alloc: %Id, trigger BGC!", gen1_index, settings.entry_memory_load, bgc_tuning::memory_load_goal, gen_number, current_gen_calc->first_alloc_to_trigger)); } } } } } if ((settings.condemned_generation == max_generation) && !(settings.concurrent)) { size_t total_survived = get_total_surv_size (gen_number); size_t total_begin = get_total_begin_data_size (gen_number); double current_gc_surv_rate = (double)total_survived * 100.0 / (double)total_begin; // calculate the adjusted gen_flr. double total_virtual_size = (double)physical_size + (double)virtual_fl_size; double total_fl_size = (double)total_generation_fl_size + (double)virtual_fl_size; double new_gen_flr = total_fl_size * 100.0 / total_virtual_size; dprintf (BGC_TUNING_LOG, ("BTL%d NGC2 size %Id->%Id, fl %Id(%.3f)->%Id(%.3f)", gen_number, physical_size, (size_t)total_virtual_size, total_generation_fl_size, gen_flr, (size_t)total_fl_size, new_gen_flr)); dprintf (BGC_TUNING_LOG, ("BTL%d* %Id, %.3f, %.3f, %.3f, %.3f, %.3f, %Id, %Id, %Id, %Id", gen_number, (size_t)total_virtual_size, 0.0, 0.0, new_gen_flr, current_gen_stats->last_gen_increase_flr, current_gc_surv_rate, 0, 0, 0, current_gen_calc->alloc_to_trigger)); bgc_tuning::gen1_index_last_bgc_end = gen1_index; current_gen_calc->last_bgc_size = total_gen_size; current_gen_calc->last_bgc_flr = new_gen_flr; current_gen_calc->last_sweep_above_p = false; current_gen_calc->last_bgc_end_alloc = 0; current_gen_stats->last_alloc_end_to_start = 0; current_gen_stats->last_alloc_start_to_sweep = 0; current_gen_stats->last_alloc_sweep_to_end = 0; current_gen_stats->last_bgc_fl_size = total_generation_fl_size; current_gen_stats->last_bgc_surv_rate = current_gc_surv_rate; current_gen_stats->last_gen_increase_flr = 0; } } } void gc_heap::get_and_reset_loh_alloc_info() { if (!bgc_tuning::enable_fl_tuning) return; total_loh_a_last_bgc = 0; uint64_t total_loh_a_no_bgc = 0; uint64_t total_loh_a_bgc_marking = 0; uint64_t total_loh_a_bgc_planning = 0; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS total_loh_a_no_bgc += hp->loh_a_no_bgc; hp->loh_a_no_bgc = 0; total_loh_a_bgc_marking += hp->loh_a_bgc_marking; hp->loh_a_bgc_marking = 0; total_loh_a_bgc_planning += hp->loh_a_bgc_planning; hp->loh_a_bgc_planning = 0; } dprintf (2, ("LOH alloc: outside bgc: %I64d; bm: %I64d; bp: %I64d", total_loh_a_no_bgc, total_loh_a_bgc_marking, total_loh_a_bgc_planning)); total_loh_a_last_bgc = total_loh_a_no_bgc + total_loh_a_bgc_marking + total_loh_a_bgc_planning; } #endif //BGC_SERVO_TUNING bool gc_heap::is_pm_ratio_exceeded() { size_t maxgen_frag = 0; size_t maxgen_size = 0; size_t total_heap_size = get_total_heap_size(); #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS maxgen_frag += dd_fragmentation (hp->dynamic_data_of (max_generation)); maxgen_size += hp->generation_size (max_generation); } double maxgen_ratio = (double)maxgen_size / (double)total_heap_size; double maxgen_frag_ratio = (double)maxgen_frag / (double)maxgen_size; dprintf (GTC_LOG, ("maxgen %Id(%d%% total heap), frag: %Id (%d%% maxgen)", maxgen_size, (int)(maxgen_ratio * 100.0), maxgen_frag, (int)(maxgen_frag_ratio * 100.0))); bool maxgen_highfrag_p = ((maxgen_ratio > 0.5) && (maxgen_frag_ratio > 0.1)); // We need to adjust elevation here because if there's enough fragmentation it's not // unproductive. if (maxgen_highfrag_p) { settings.should_lock_elevation = FALSE; dprintf (GTC_LOG, ("high frag gen2, turn off elevation")); } return maxgen_highfrag_p; } void gc_heap::update_recorded_gen_data (last_recorded_gc_info* gc_info) { memset (gc_info->gen_info, 0, sizeof (gc_info->gen_info)); #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; #else //MULTIPLE_HEAPS { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS gc_history_per_heap* current_gc_data_per_heap = hp->get_gc_data_per_heap(); for (int gen_number = 0; gen_number < total_generation_count; gen_number++) { recorded_generation_info* recorded_info = &(gc_info->gen_info[gen_number]); gc_generation_data* data = &(current_gc_data_per_heap->gen_data[gen_number]); recorded_info->size_before += data->size_before; recorded_info->fragmentation_before += data->free_list_space_before + data->free_obj_space_before; recorded_info->size_after += data->size_after; recorded_info->fragmentation_after += data->free_list_space_after + data->free_obj_space_after; } } } void gc_heap::do_post_gc() { #ifdef MULTIPLE_HEAPS gc_heap* hp = g_heaps[0]; #else gc_heap* hp = 0; #endif //MULTIPLE_HEAPS GCToEEInterface::GcDone(settings.condemned_generation); GCToEEInterface::DiagGCEnd(VolatileLoad(&settings.gc_index), (uint32_t)settings.condemned_generation, (uint32_t)settings.reason, !!settings.concurrent); add_to_history(); uint32_t current_memory_load = 0; #ifdef BGC_SERVO_TUNING if (bgc_tuning::enable_fl_tuning) { uint64_t current_available_physical = 0; size_t gen2_physical_size = 0; size_t gen3_physical_size = 0; ptrdiff_t gen2_virtual_fl_size = 0; ptrdiff_t gen3_virtual_fl_size = 0; ptrdiff_t vfl_from_kp = 0; ptrdiff_t vfl_from_ki = 0; gen2_physical_size = get_total_generation_size (max_generation); gen3_physical_size = get_total_generation_size (loh_generation); get_memory_info (&current_memory_load, &current_available_physical); if ((settings.condemned_generation == max_generation) && !settings.concurrent) { double gen2_size_ratio = (double)gen2_physical_size / ((double)gen2_physical_size + (double)gen3_physical_size); double total_virtual_fl_size = bgc_tuning::calculate_ml_tuning (current_available_physical, true, &vfl_from_kp, &vfl_from_ki); gen2_virtual_fl_size = (ptrdiff_t)(total_virtual_fl_size * gen2_size_ratio); gen3_virtual_fl_size = (ptrdiff_t)(total_virtual_fl_size * (1.0 - gen2_size_ratio)); #ifdef SIMPLE_DPRINTF dprintf (BGC_TUNING_LOG, ("BTL: ml: %d (g: %d)(%s), a: %I64d (g: %I64d, elg: %Id+%Id=%Id, %Id+%Id=%Id), vfl: %Id=%Id+%Id(NGC2)", current_memory_load, bgc_tuning::memory_load_goal, ((current_available_physical > bgc_tuning::available_memory_goal) ? "above" : "below"), current_available_physical, bgc_tuning::available_memory_goal, gen2_physical_size, gen2_virtual_fl_size, (gen2_physical_size + gen2_virtual_fl_size), gen3_physical_size, gen3_virtual_fl_size, (gen3_physical_size + gen3_virtual_fl_size), (ptrdiff_t)total_virtual_fl_size, vfl_from_kp, vfl_from_ki)); #endif //SIMPLE_DPRINTF } check_and_adjust_bgc_tuning (max_generation, gen2_physical_size, gen2_virtual_fl_size); check_and_adjust_bgc_tuning (loh_generation, gen3_physical_size, gen3_virtual_fl_size); } #endif //BGC_SERVO_TUNING dprintf (1, (ThreadStressLog::gcDetailedEndMsg(), VolatileLoad(&settings.gc_index), dd_collection_count(hp->dynamic_data_of(0)), (size_t)(GetHighPrecisionTimeStamp() / 1000), settings.condemned_generation, (settings.concurrent ? "BGC" : (gc_heap::background_running_p() ? "FGC" : "NGC")), (settings.compaction ? "C" : "S"), (settings.promotion ? "P" : "S"), settings.entry_memory_load, current_memory_load)); // Now record the gc info. last_recorded_gc_info* last_gc_info = 0; #ifdef BACKGROUND_GC if (settings.concurrent) { last_gc_info = &last_bgc_info[last_bgc_info_index]; assert (last_gc_info->index == settings.gc_index); } else #endif //BACKGROUND_GC { last_gc_info = ((settings.condemned_generation == max_generation) ? &last_full_blocking_gc_info : &last_ephemeral_gc_info); last_gc_info->index = settings.gc_index; } size_t total_heap_committed = get_total_committed_size(); last_gc_info->total_committed = total_heap_committed; last_gc_info->promoted = get_total_promoted(); last_gc_info->pinned_objects = get_total_pinned_objects(); last_gc_info->finalize_promoted_objects = GCHeap::GetFinalizablePromotedCount(); if (!settings.concurrent) { // If it's a normal blocking GC with its own SuspendEE, we simply get the elapsed time recoreded // and add the time between SuspendEE start and GC start. dynamic_data* dd = hp->dynamic_data_of (settings.condemned_generation); uint64_t gc_start_ts = dd_time_clock (dd); size_t pause_duration = (size_t)(end_gc_time - dd_time_clock (dd)); #ifdef BACKGROUND_GC if ((hp->current_bgc_state != bgc_initialized) && (settings.reason != reason_pm_full_gc)) { pause_duration += (size_t)(gc_start_ts - suspended_start_time); } #endif //BACKGROUND_GC last_gc_info->pause_durations[0] = pause_duration; total_suspended_time += pause_duration; last_gc_info->pause_durations[1] = 0; } uint64_t total_process_time = end_gc_time - process_start_time; last_gc_info->pause_percentage = (float)(total_process_time ? ((double)total_suspended_time / (double)total_process_time * 100.0) : 0); update_recorded_gen_data (last_gc_info); last_gc_info->heap_size = get_total_heap_size(); last_gc_info->fragmentation = get_total_fragmentation(); if (settings.exit_memory_load != 0) last_gc_info->memory_load = settings.exit_memory_load; else if (settings.entry_memory_load != 0) last_gc_info->memory_load = settings.entry_memory_load; last_gc_info->condemned_generation = settings.condemned_generation; last_gc_info->compaction = settings.compaction; last_gc_info->concurrent = settings.concurrent; #ifdef BACKGROUND_GC is_last_recorded_bgc = settings.concurrent; #endif //BACKGROUND_GC #ifdef TRACE_GC if (heap_hard_limit) { size_t total_heap_committed_recorded = current_total_committed - current_total_committed_bookkeeping; dprintf (1, ("(%d)GC commit END #%Id: %Id (recorded: %Id=%Id-%Id), heap %Id, frag: %Id", settings.condemned_generation, (size_t)settings.gc_index, total_heap_committed, total_heap_committed_recorded, current_total_committed, current_total_committed_bookkeeping, last_gc_info->heap_size, last_gc_info->fragmentation)); } #endif //TRACE_GC // Note we only do this at the end of full blocking GCs because we do not want // to turn on this provisional mode during the middle of a BGC. if ((settings.condemned_generation == max_generation) && (!settings.concurrent)) { if (pm_stress_on) { size_t full_compacting_gc_count = full_gc_counts[gc_type_compacting]; if (provisional_mode_triggered) { uint64_t r = gc_rand::get_rand(10); if ((full_compacting_gc_count - provisional_triggered_gc_count) >= r) { provisional_mode_triggered = false; provisional_off_gc_count = full_compacting_gc_count; dprintf (GTC_LOG, ("%Id NGC2s when turned on, %Id NGCs since(%Id)", provisional_triggered_gc_count, (full_compacting_gc_count - provisional_triggered_gc_count), num_provisional_triggered)); } } else { uint64_t r = gc_rand::get_rand(5); if ((full_compacting_gc_count - provisional_off_gc_count) >= r) { provisional_mode_triggered = true; provisional_triggered_gc_count = full_compacting_gc_count; num_provisional_triggered++; dprintf (GTC_LOG, ("%Id NGC2s when turned off, %Id NGCs since(%Id)", provisional_off_gc_count, (full_compacting_gc_count - provisional_off_gc_count), num_provisional_triggered)); } } } else { if (provisional_mode_triggered) { if ((settings.entry_memory_load < high_memory_load_th) || !is_pm_ratio_exceeded()) { dprintf (GTC_LOG, ("turning off PM")); provisional_mode_triggered = false; } } else if ((settings.entry_memory_load >= high_memory_load_th) && is_pm_ratio_exceeded()) { dprintf (GTC_LOG, ("highmem && highfrag - turning on PM")); provisional_mode_triggered = true; num_provisional_triggered++; } } } GCHeap::UpdatePostGCCounters(); // We need to reinitialize the number of pinned objects because it's used in the GCHeapStats // event fired in GCHeap::UpdatePostGCCounters. For BGC, we will get that event following an // FGC's GCHeapStats and we wouldn't want that FGC's info to carry over to the BGC. reinit_pinned_objects(); #ifdef STRESS_LOG STRESS_LOG_GC_END(VolatileLoad(&settings.gc_index), (uint32_t)settings.condemned_generation, (uint32_t)settings.reason); #endif // STRESS_LOG #ifdef GC_CONFIG_DRIVEN if (!settings.concurrent) { if (settings.compaction) (compact_or_sweep_gcs[0])++; else (compact_or_sweep_gcs[1])++; } #ifdef MULTIPLE_HEAPS for (int i = 0; i < n_heaps; i++) g_heaps[i]->record_interesting_info_per_heap(); #else record_interesting_info_per_heap(); #endif //MULTIPLE_HEAPS record_global_mechanisms(); #endif //GC_CONFIG_DRIVEN if (mark_list_overflow) { grow_mark_list(); mark_list_overflow = false; } } unsigned GCHeap::GetGcCount() { return (unsigned int)VolatileLoad(&pGenGCHeap->settings.gc_index); } size_t GCHeap::GarbageCollectGeneration (unsigned int gen, gc_reason reason) { dprintf (2, ("triggered a GC!")); #ifdef MULTIPLE_HEAPS gc_heap* hpt = gc_heap::g_heaps[0]; #else gc_heap* hpt = 0; #endif //MULTIPLE_HEAPS bool cooperative_mode = true; dynamic_data* dd = hpt->dynamic_data_of (gen); size_t localCount = dd_collection_count (dd); enter_spin_lock (&gc_heap::gc_lock); dprintf (SPINLOCK_LOG, ("GC Egc")); ASSERT_HOLDING_SPIN_LOCK(&gc_heap::gc_lock); //don't trigger another GC if one was already in progress //while waiting for the lock { size_t col_count = dd_collection_count (dd); if (localCount != col_count) { #ifdef SYNCHRONIZATION_STATS gc_lock_contended++; #endif //SYNCHRONIZATION_STATS dprintf (SPINLOCK_LOG, ("no need GC Lgc")); leave_spin_lock (&gc_heap::gc_lock); // We don't need to release msl here 'cause this means a GC // has happened and would have release all msl's. return col_count; } } gc_heap::g_low_memory_status = (reason == reason_lowmemory) || (reason == reason_lowmemory_blocking) || (gc_heap::latency_level == latency_level_memory_footprint); gc_trigger_reason = reason; #ifdef MULTIPLE_HEAPS for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap::g_heaps[i]->reset_gc_done(); } #else gc_heap::reset_gc_done(); #endif //MULTIPLE_HEAPS gc_heap::gc_started = TRUE; { init_sync_log_stats(); #ifndef MULTIPLE_HEAPS cooperative_mode = gc_heap::enable_preemptive (); dprintf (2, ("Suspending EE")); gc_heap::suspended_start_time = GetHighPrecisionTimeStamp(); BEGIN_TIMING(suspend_ee_during_log); GCToEEInterface::SuspendEE(SUSPEND_FOR_GC); END_TIMING(suspend_ee_during_log); gc_heap::proceed_with_gc_p = gc_heap::should_proceed_with_gc(); gc_heap::disable_preemptive (cooperative_mode); if (gc_heap::proceed_with_gc_p) pGenGCHeap->settings.init_mechanisms(); else gc_heap::update_collection_counts_for_no_gc(); #endif //!MULTIPLE_HEAPS } unsigned int condemned_generation_number = gen; // We want to get a stack from the user thread that triggered the GC // instead of on the GC thread which is the case for Server GC. // But we are doing it for Workstation GC as well to be uniform. FIRE_EVENT(GCTriggered, static_cast<uint32_t>(reason)); #ifdef MULTIPLE_HEAPS GcCondemnedGeneration = condemned_generation_number; cooperative_mode = gc_heap::enable_preemptive (); BEGIN_TIMING(gc_during_log); gc_heap::ee_suspend_event.Set(); gc_heap::wait_for_gc_done(); END_TIMING(gc_during_log); gc_heap::disable_preemptive (cooperative_mode); condemned_generation_number = GcCondemnedGeneration; #else if (gc_heap::proceed_with_gc_p) { BEGIN_TIMING(gc_during_log); pGenGCHeap->garbage_collect (condemned_generation_number); if (gc_heap::pm_trigger_full_gc) { pGenGCHeap->garbage_collect_pm_full_gc(); } END_TIMING(gc_during_log); } #endif //MULTIPLE_HEAPS #ifdef BACKGROUND_GC // We are deciding whether we should fire the alloc wait end event here // because in begin_foreground we could be calling end_foreground // if we need to retry. if (gc_heap::alloc_wait_event_p) { hpt->fire_alloc_wait_event_end (awr_fgc_wait_for_bgc); gc_heap::alloc_wait_event_p = FALSE; } #endif //BACKGROUND_GC #ifndef MULTIPLE_HEAPS #ifdef BACKGROUND_GC if (!gc_heap::dont_restart_ee_p) #endif //BACKGROUND_GC { #ifdef BACKGROUND_GC gc_heap::add_bgc_pause_duration_0(); #endif //BACKGROUND_GC BEGIN_TIMING(restart_ee_during_log); GCToEEInterface::RestartEE(TRUE); END_TIMING(restart_ee_during_log); } #endif //!MULTIPLE_HEAPS #ifndef MULTIPLE_HEAPS process_sync_log_stats(); gc_heap::gc_started = FALSE; gc_heap::set_gc_done(); dprintf (SPINLOCK_LOG, ("GC Lgc")); leave_spin_lock (&gc_heap::gc_lock); #endif //!MULTIPLE_HEAPS #ifdef FEATURE_PREMORTEM_FINALIZATION GCToEEInterface::EnableFinalization(!pGenGCHeap->settings.concurrent && pGenGCHeap->settings.found_finalizers); #endif // FEATURE_PREMORTEM_FINALIZATION return dd_collection_count (dd); } size_t GCHeap::GetTotalBytesInUse () { #ifdef MULTIPLE_HEAPS //enumerate all the heaps and get their size. size_t tot_size = 0; for (int i = 0; i < gc_heap::n_heaps; i++) { GCHeap* Hp = gc_heap::g_heaps [i]->vm_heap; tot_size += Hp->ApproxTotalBytesInUse(); } return tot_size; #else return ApproxTotalBytesInUse(); #endif //MULTIPLE_HEAPS } // Get the total allocated bytes uint64_t GCHeap::GetTotalAllocatedBytes() { #ifdef MULTIPLE_HEAPS uint64_t total_alloc_bytes = 0; for (int i = 0; i < gc_heap::n_heaps; i++) { gc_heap* hp = gc_heap::g_heaps[i]; total_alloc_bytes += hp->total_alloc_bytes_soh; total_alloc_bytes += hp->total_alloc_bytes_uoh; } return total_alloc_bytes; #else return (pGenGCHeap->total_alloc_bytes_soh + pGenGCHeap->total_alloc_bytes_uoh); #endif //MULTIPLE_HEAPS } int GCHeap::CollectionCount (int generation, int get_bgc_fgc_count) { if (get_bgc_fgc_count != 0) { #ifdef BACKGROUND_GC if (generation == max_generation) { return (int)(gc_heap::full_gc_counts[gc_type_background]); } else { return (int)(gc_heap::ephemeral_fgc_counts[generation]); } #else return 0; #endif //BACKGROUND_GC } #ifdef MULTIPLE_HEAPS gc_heap* hp = gc_heap::g_heaps [0]; #else //MULTIPLE_HEAPS gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS if (generation > max_generation) return 0; else return (int)dd_collection_count (hp->dynamic_data_of (generation)); } size_t GCHeap::ApproxTotalBytesInUse(BOOL small_heap_only) { size_t totsize = 0; enter_spin_lock (&pGenGCHeap->gc_lock); // For gen0 it's a bit complicated because we are currently allocating in it. We get the fragmentation first // just so that we don't give a negative number for the resulting size. generation* gen = pGenGCHeap->generation_of (0); size_t gen0_frag = generation_free_list_space (gen) + generation_free_obj_space (gen); uint8_t* current_alloc_allocated = pGenGCHeap->alloc_allocated; heap_segment* current_eph_seg = pGenGCHeap->ephemeral_heap_segment; size_t gen0_size = 0; #ifdef USE_REGIONS heap_segment* gen0_seg = generation_start_segment (gen); while (gen0_seg) { uint8_t* end = in_range_for_segment (current_alloc_allocated, gen0_seg) ? current_alloc_allocated : heap_segment_allocated (gen0_seg); gen0_size += end - heap_segment_mem (gen0_seg); if (gen0_seg == current_eph_seg) { break; } gen0_seg = heap_segment_next (gen0_seg); } #else //USE_REGIONS // For segments ephemeral seg does not change. gen0_size = current_alloc_allocated - heap_segment_mem (current_eph_seg); #endif //USE_REGIONS totsize = gen0_size - gen0_frag; int stop_gen_index = max_generation; if (gc_heap::current_c_gc_state == c_gc_state_planning) { // During BGC sweep since we can be deleting SOH segments, we avoid walking the segment // list. generation* oldest_gen = pGenGCHeap->generation_of (max_generation); totsize = pGenGCHeap->background_soh_size_end_mark - generation_free_list_space (oldest_gen) - generation_free_obj_space (oldest_gen); stop_gen_index--; } for (int i = (max_generation - 1); i <= stop_gen_index; i++) { generation* gen = pGenGCHeap->generation_of (i); totsize += pGenGCHeap->generation_size (i) - generation_free_list_space (gen) - generation_free_obj_space (gen); } if (!small_heap_only) { for (int i = uoh_start_generation; i < total_generation_count; i++) { generation* gen = pGenGCHeap->generation_of (i); totsize += pGenGCHeap->generation_size (i) - generation_free_list_space (gen) - generation_free_obj_space (gen); } } leave_spin_lock (&pGenGCHeap->gc_lock); return totsize; } #ifdef MULTIPLE_HEAPS void GCHeap::AssignHeap (alloc_context* acontext) { // Assign heap based on processor acontext->set_alloc_heap(GetHeap(heap_select::select_heap(acontext))); acontext->set_home_heap(acontext->get_alloc_heap()); } GCHeap* GCHeap::GetHeap (int n) { assert (n < gc_heap::n_heaps); return gc_heap::g_heaps[n]->vm_heap; } #endif //MULTIPLE_HEAPS bool GCHeap::IsThreadUsingAllocationContextHeap(gc_alloc_context* context, int thread_number) { alloc_context* acontext = static_cast<alloc_context*>(context); #ifdef MULTIPLE_HEAPS return ((acontext->get_home_heap() == GetHeap(thread_number)) || ((acontext->get_home_heap() == 0) && (thread_number == 0))); #else UNREFERENCED_PARAMETER(acontext); UNREFERENCED_PARAMETER(thread_number); return true; #endif //MULTIPLE_HEAPS } // Returns the number of processors required to trigger the use of thread based allocation contexts int GCHeap::GetNumberOfHeaps () { #ifdef MULTIPLE_HEAPS return gc_heap::n_heaps; #else return 1; #endif //MULTIPLE_HEAPS } /* in this way we spend extra time cycling through all the heaps while create the handle it ought to be changed by keeping alloc_context.home_heap as number (equals heap_number) */ int GCHeap::GetHomeHeapNumber () { #ifdef MULTIPLE_HEAPS gc_alloc_context* ctx = GCToEEInterface::GetAllocContext(); if (!ctx) { return 0; } GCHeap *hp = static_cast<alloc_context*>(ctx)->get_home_heap(); return (hp ? hp->pGenGCHeap->heap_number : 0); #else return 0; #endif //MULTIPLE_HEAPS } unsigned int GCHeap::GetCondemnedGeneration() { return gc_heap::settings.condemned_generation; } void GCHeap::GetMemoryInfo(uint64_t* highMemLoadThresholdBytes, uint64_t* totalAvailableMemoryBytes, uint64_t* lastRecordedMemLoadBytes, uint64_t* lastRecordedHeapSizeBytes, uint64_t* lastRecordedFragmentationBytes, uint64_t* totalCommittedBytes, uint64_t* promotedBytes, uint64_t* pinnedObjectCount, uint64_t* finalizationPendingCount, uint64_t* index, uint32_t* generation, uint32_t* pauseTimePct, bool* isCompaction, bool* isConcurrent, uint64_t* genInfoRaw, uint64_t* pauseInfoRaw, int kind) { last_recorded_gc_info* last_gc_info = 0; if ((gc_kind)kind == gc_kind_ephemeral) { last_gc_info = &gc_heap::last_ephemeral_gc_info; } else if ((gc_kind)kind == gc_kind_full_blocking) { last_gc_info = &gc_heap::last_full_blocking_gc_info; } #ifdef BACKGROUND_GC else if ((gc_kind)kind == gc_kind_background) { last_gc_info = gc_heap::get_completed_bgc_info(); } #endif //BACKGROUND_GC else { assert ((gc_kind)kind == gc_kind_any); #ifdef BACKGROUND_GC if (gc_heap::is_last_recorded_bgc) { last_gc_info = gc_heap::get_completed_bgc_info(); } else #endif //BACKGROUND_GC { last_gc_info = ((gc_heap::last_ephemeral_gc_info.index > gc_heap::last_full_blocking_gc_info.index) ? &gc_heap::last_ephemeral_gc_info : &gc_heap::last_full_blocking_gc_info); } } *highMemLoadThresholdBytes = (uint64_t) (((double)(gc_heap::high_memory_load_th)) / 100 * gc_heap::total_physical_mem); *totalAvailableMemoryBytes = gc_heap::heap_hard_limit != 0 ? gc_heap::heap_hard_limit : gc_heap::total_physical_mem; *lastRecordedMemLoadBytes = (uint64_t) (((double)(last_gc_info->memory_load)) / 100 * gc_heap::total_physical_mem); *lastRecordedHeapSizeBytes = last_gc_info->heap_size; *lastRecordedFragmentationBytes = last_gc_info->fragmentation; *totalCommittedBytes = last_gc_info->total_committed; *promotedBytes = last_gc_info->promoted; *pinnedObjectCount = last_gc_info->pinned_objects; *finalizationPendingCount = last_gc_info->finalize_promoted_objects; *index = last_gc_info->index; *generation = last_gc_info->condemned_generation; *pauseTimePct = (int)(last_gc_info->pause_percentage * 100); *isCompaction = last_gc_info->compaction; *isConcurrent = last_gc_info->concurrent; int genInfoIndex = 0; for (int i = 0; i < total_generation_count; i++) { genInfoRaw[genInfoIndex++] = last_gc_info->gen_info[i].size_before; genInfoRaw[genInfoIndex++] = last_gc_info->gen_info[i].fragmentation_before; genInfoRaw[genInfoIndex++] = last_gc_info->gen_info[i].size_after; genInfoRaw[genInfoIndex++] = last_gc_info->gen_info[i].fragmentation_after; } for (int i = 0; i < 2; i++) { // convert it to 100-ns units that TimeSpan needs. pauseInfoRaw[i] = (uint64_t)(last_gc_info->pause_durations[i]) * 10; } #ifdef _DEBUG if ((gc_kind)kind == gc_kind_ephemeral) { assert (last_gc_info->condemned_generation < max_generation); } else if ((gc_kind)kind == gc_kind_full_blocking) { assert (last_gc_info->condemned_generation == max_generation); assert (last_gc_info->concurrent == false); } #ifdef BACKGROUND_GC else if ((gc_kind)kind == gc_kind_background) { assert (last_gc_info->condemned_generation == max_generation); assert (last_gc_info->concurrent == true); } #endif //BACKGROUND_GC #endif //_DEBUG } uint32_t GCHeap::GetMemoryLoad() { uint32_t memory_load = 0; if (gc_heap::settings.exit_memory_load != 0) memory_load = gc_heap::settings.exit_memory_load; else if (gc_heap::settings.entry_memory_load != 0) memory_load = gc_heap::settings.entry_memory_load; return memory_load; } int GCHeap::GetGcLatencyMode() { return (int)(pGenGCHeap->settings.pause_mode); } int GCHeap::SetGcLatencyMode (int newLatencyMode) { if (gc_heap::settings.pause_mode == pause_no_gc) return (int)set_pause_mode_no_gc; gc_pause_mode new_mode = (gc_pause_mode)newLatencyMode; if (new_mode == pause_low_latency) { #ifndef MULTIPLE_HEAPS pGenGCHeap->settings.pause_mode = new_mode; #endif //!MULTIPLE_HEAPS } else if (new_mode == pause_sustained_low_latency) { #ifdef BACKGROUND_GC if (gc_heap::gc_can_use_concurrent) { pGenGCHeap->settings.pause_mode = new_mode; } #endif //BACKGROUND_GC } else { pGenGCHeap->settings.pause_mode = new_mode; } #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { // If we get here, it means we are doing an FGC. If the pause // mode was altered we will need to save it in the BGC settings. if (gc_heap::saved_bgc_settings.pause_mode != new_mode) { gc_heap::saved_bgc_settings.pause_mode = new_mode; } } #endif //BACKGROUND_GC return (int)set_pause_mode_success; } int GCHeap::GetLOHCompactionMode() { #ifdef FEATURE_LOH_COMPACTION return pGenGCHeap->loh_compaction_mode; #else return loh_compaction_default; #endif //FEATURE_LOH_COMPACTION } void GCHeap::SetLOHCompactionMode (int newLOHCompactionMode) { #ifdef FEATURE_LOH_COMPACTION pGenGCHeap->loh_compaction_mode = (gc_loh_compaction_mode)newLOHCompactionMode; #endif //FEATURE_LOH_COMPACTION } bool GCHeap::RegisterForFullGCNotification(uint32_t gen2Percentage, uint32_t lohPercentage) { #ifdef MULTIPLE_HEAPS for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; hp->fgn_last_alloc = dd_new_allocation (hp->dynamic_data_of (0)); hp->fgn_maxgen_percent = gen2Percentage; } #else //MULTIPLE_HEAPS pGenGCHeap->fgn_last_alloc = dd_new_allocation (pGenGCHeap->dynamic_data_of (0)); pGenGCHeap->fgn_maxgen_percent = gen2Percentage; #endif //MULTIPLE_HEAPS pGenGCHeap->full_gc_approach_event.Reset(); pGenGCHeap->full_gc_end_event.Reset(); pGenGCHeap->full_gc_approach_event_set = false; pGenGCHeap->fgn_loh_percent = lohPercentage; return TRUE; } bool GCHeap::CancelFullGCNotification() { #ifdef MULTIPLE_HEAPS for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; hp->fgn_maxgen_percent = 0; } #else //MULTIPLE_HEAPS pGenGCHeap->fgn_maxgen_percent = 0; #endif //MULTIPLE_HEAPS pGenGCHeap->fgn_loh_percent = 0; pGenGCHeap->full_gc_approach_event.Set(); pGenGCHeap->full_gc_end_event.Set(); return TRUE; } int GCHeap::WaitForFullGCApproach(int millisecondsTimeout) { dprintf (2, ("WFGA: Begin wait")); int result = gc_heap::full_gc_wait (&(pGenGCHeap->full_gc_approach_event), millisecondsTimeout); dprintf (2, ("WFGA: End wait")); return result; } int GCHeap::WaitForFullGCComplete(int millisecondsTimeout) { dprintf (2, ("WFGE: Begin wait")); int result = gc_heap::full_gc_wait (&(pGenGCHeap->full_gc_end_event), millisecondsTimeout); dprintf (2, ("WFGE: End wait")); return result; } int GCHeap::StartNoGCRegion(uint64_t totalSize, bool lohSizeKnown, uint64_t lohSize, bool disallowFullBlockingGC) { NoGCRegionLockHolder lh; dprintf (1, ("begin no gc called")); start_no_gc_region_status status = gc_heap::prepare_for_no_gc_region (totalSize, lohSizeKnown, lohSize, disallowFullBlockingGC); if (status == start_no_gc_success) { GarbageCollect (max_generation); status = gc_heap::get_start_no_gc_region_status(); } if (status != start_no_gc_success) gc_heap::handle_failure_for_no_gc(); return (int)status; } int GCHeap::EndNoGCRegion() { NoGCRegionLockHolder lh; return (int)gc_heap::end_no_gc_region(); } void GCHeap::PublishObject (uint8_t* Obj) { #ifdef BACKGROUND_GC gc_heap* hp = gc_heap::heap_of (Obj); hp->bgc_alloc_lock->uoh_alloc_done (Obj); hp->bgc_untrack_uoh_alloc(); #endif //BACKGROUND_GC } // The spec for this one isn't clear. This function // returns the size that can be allocated without // triggering a GC of any kind. size_t GCHeap::ApproxFreeBytes() { enter_spin_lock (&pGenGCHeap->gc_lock); generation* gen = pGenGCHeap->generation_of (0); size_t res = generation_allocation_limit (gen) - generation_allocation_pointer (gen); leave_spin_lock (&pGenGCHeap->gc_lock); return res; } HRESULT GCHeap::GetGcCounters(int gen, gc_counters* counters) { if ((gen < 0) || (gen > max_generation)) return E_FAIL; #ifdef MULTIPLE_HEAPS counters->current_size = 0; counters->promoted_size = 0; counters->collection_count = 0; //enumerate all the heaps and get their counters. for (int i = 0; i < gc_heap::n_heaps; i++) { dynamic_data* dd = gc_heap::g_heaps [i]->dynamic_data_of (gen); counters->current_size += dd_current_size (dd); counters->promoted_size += dd_promoted_size (dd); if (i == 0) counters->collection_count += dd_collection_count (dd); } #else dynamic_data* dd = pGenGCHeap->dynamic_data_of (gen); counters->current_size = dd_current_size (dd); counters->promoted_size = dd_promoted_size (dd); counters->collection_count = dd_collection_count (dd); #endif //MULTIPLE_HEAPS return S_OK; } // Get the segment size to use, making sure it conforms. size_t GCHeap::GetValidSegmentSize(bool large_seg) { #ifdef USE_REGIONS return (large_seg ? global_region_allocator.get_large_region_alignment() : global_region_allocator.get_region_alignment()); #else return (large_seg ? gc_heap::min_uoh_segment_size : gc_heap::soh_segment_size); #endif //USE_REGIONS } size_t gc_heap::get_gen0_min_size() { size_t gen0size = static_cast<size_t>(GCConfig::GetGen0Size()); bool is_config_invalid = ((gen0size == 0) || !g_theGCHeap->IsValidGen0MaxSize(gen0size)); if (is_config_invalid) { #ifdef SERVER_GC // performance data seems to indicate halving the size results // in optimal perf. Ask for adjusted gen0 size. gen0size = max(GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE),(256*1024)); // if gen0 size is too large given the available memory, reduce it. // Get true cache size, as we don't want to reduce below this. size_t trueSize = max(GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE),(256*1024)); dprintf (1, ("cache: %Id-%Id", GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE), GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE))); int n_heaps = gc_heap::n_heaps; #else //SERVER_GC size_t trueSize = GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE); gen0size = max((4*trueSize/5),(256*1024)); trueSize = max(trueSize, (256*1024)); int n_heaps = 1; #endif //SERVER_GC dprintf (1, ("gen0size: %Id * %d = %Id, physical mem: %Id / 6 = %Id", gen0size, n_heaps, (gen0size * n_heaps), gc_heap::total_physical_mem, gc_heap::total_physical_mem / 6)); // if the total min GC across heaps will exceed 1/6th of available memory, // then reduce the min GC size until it either fits or has been reduced to cache size. while ((gen0size * n_heaps) > (gc_heap::total_physical_mem / 6)) { gen0size = gen0size / 2; if (gen0size <= trueSize) { gen0size = trueSize; break; } } } #ifdef FEATURE_EVENT_TRACE else { gen0_min_budget_from_config = gen0size; } #endif //FEATURE_EVENT_TRACE size_t seg_size = gc_heap::soh_segment_size; assert (seg_size); // Generation 0 must never be more than 1/2 the segment size. if (gen0size >= (seg_size / 2)) gen0size = seg_size / 2; // If the value from config is valid we use it as is without this adjustment. if (is_config_invalid) { if (heap_hard_limit) { size_t gen0size_seg = seg_size / 8; if (gen0size >= gen0size_seg) { dprintf (1, ("gen0 limited by seg size %Id->%Id", gen0size, gen0size_seg)); gen0size = gen0size_seg; } } gen0size = gen0size / 8 * 5; } #ifdef USE_REGIONS #ifdef STRESS_REGIONS // This is just so we can test allocation using more than one region on machines with very // small caches. gen0size = ((size_t)1 << min_segment_size_shr) * 3; #endif //STRESS_REGIONS #endif //USE_REGIONS gen0size = Align (gen0size); return gen0size; } void GCHeap::SetReservedVMLimit (size_t vmlimit) { gc_heap::reserved_memory_limit = vmlimit; } //versions of same method on each heap #ifdef FEATURE_PREMORTEM_FINALIZATION Object* GCHeap::GetNextFinalizableObject() { #ifdef MULTIPLE_HEAPS //return the first non critical one in the first queue. for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; Object* O = hp->finalize_queue->GetNextFinalizableObject(TRUE); if (O) return O; } //return the first non critical/critical one in the first queue. for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; Object* O = hp->finalize_queue->GetNextFinalizableObject(FALSE); if (O) return O; } return 0; #else //MULTIPLE_HEAPS return pGenGCHeap->finalize_queue->GetNextFinalizableObject(); #endif //MULTIPLE_HEAPS } size_t GCHeap::GetNumberFinalizableObjects() { #ifdef MULTIPLE_HEAPS size_t cnt = 0; for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; cnt += hp->finalize_queue->GetNumberFinalizableObjects(); } return cnt; #else //MULTIPLE_HEAPS return pGenGCHeap->finalize_queue->GetNumberFinalizableObjects(); #endif //MULTIPLE_HEAPS } size_t GCHeap::GetFinalizablePromotedCount() { #ifdef MULTIPLE_HEAPS size_t cnt = 0; for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; cnt += hp->finalize_queue->GetPromotedCount(); } return cnt; #else //MULTIPLE_HEAPS return pGenGCHeap->finalize_queue->GetPromotedCount(); #endif //MULTIPLE_HEAPS } //--------------------------------------------------------------------------- // Finalized class tracking //--------------------------------------------------------------------------- bool GCHeap::RegisterForFinalization (int gen, Object* obj) { if (gen == -1) gen = 0; if (((((CObjectHeader*)obj)->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN)) { ((CObjectHeader*)obj)->GetHeader()->ClrBit(BIT_SBLK_FINALIZER_RUN); return true; } else { gc_heap* hp = gc_heap::heap_of ((uint8_t*)obj); return hp->finalize_queue->RegisterForFinalization (gen, obj); } } void GCHeap::SetFinalizationRun (Object* obj) { ((CObjectHeader*)obj)->GetHeader()->SetBit(BIT_SBLK_FINALIZER_RUN); } //-------------------------------------------------------------------- // // Support for finalization // //-------------------------------------------------------------------- inline unsigned int gen_segment (int gen) { assert (((signed)total_generation_count - gen - 1)>=0); return (total_generation_count - gen - 1); } bool CFinalize::Initialize() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; m_Array = new (nothrow)(Object*[100]); if (!m_Array) { ASSERT (m_Array); STRESS_LOG_OOM_STACK(sizeof(Object*[100])); if (GCConfig::GetBreakOnOOM()) { GCToOSInterface::DebugBreak(); } return false; } m_EndArray = &m_Array[100]; for (int i =0; i < FreeList; i++) { SegQueueLimit (i) = m_Array; } m_PromotedCount = 0; lock = -1; #ifdef _DEBUG lockowner_threadid.Clear(); #endif // _DEBUG return true; } CFinalize::~CFinalize() { delete m_Array; } size_t CFinalize::GetPromotedCount () { return m_PromotedCount; } inline void CFinalize::EnterFinalizeLock() { _ASSERTE(dbgOnly_IsSpecialEEThread() || GCToEEInterface::GetThread() == 0 || GCToEEInterface::IsPreemptiveGCDisabled()); retry: if (Interlocked::CompareExchange(&lock, 0, -1) >= 0) { unsigned int i = 0; while (lock >= 0) { YieldProcessor(); // indicate to the processor that we are spinning if (++i & 7) GCToOSInterface::YieldThread (0); else GCToOSInterface::Sleep (5); } goto retry; } #ifdef _DEBUG lockowner_threadid.SetToCurrentThread(); #endif // _DEBUG } inline void CFinalize::LeaveFinalizeLock() { _ASSERTE(dbgOnly_IsSpecialEEThread() || GCToEEInterface::GetThread() == 0 || GCToEEInterface::IsPreemptiveGCDisabled()); #ifdef _DEBUG lockowner_threadid.Clear(); #endif // _DEBUG lock = -1; } bool CFinalize::RegisterForFinalization (int gen, Object* obj, size_t size) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; EnterFinalizeLock(); // Adjust gen unsigned int dest = gen_segment (gen); // Adjust boundary for segments so that GC will keep objects alive. Object*** s_i = &SegQueue (FreeList); if ((*s_i) == m_EndArray) { if (!GrowArray()) { LeaveFinalizeLock(); if (method_table(obj) == NULL) { // If the object is uninitialized, a valid size should have been passed. assert (size >= Align (min_obj_size)); dprintf (3, (ThreadStressLog::gcMakeUnusedArrayMsg(), (size_t)obj, (size_t)(obj+size))); ((CObjectHeader*)obj)->SetFree(size); } STRESS_LOG_OOM_STACK(0); if (GCConfig::GetBreakOnOOM()) { GCToOSInterface::DebugBreak(); } return false; } } Object*** end_si = &SegQueueLimit (dest); do { //is the segment empty? if (!(*s_i == *(s_i-1))) { //no, swap the end elements. *(*s_i) = *(*(s_i-1)); } //increment the fill pointer (*s_i)++; //go to the next segment. s_i--; } while (s_i > end_si); // We have reached the destination segment // store the object **s_i = obj; // increment the fill pointer (*s_i)++; LeaveFinalizeLock(); return true; } Object* CFinalize::GetNextFinalizableObject (BOOL only_non_critical) { Object* obj = 0; EnterFinalizeLock(); if (!IsSegEmpty(FinalizerListSeg)) { obj = *(--SegQueueLimit (FinalizerListSeg)); } else if (!only_non_critical && !IsSegEmpty(CriticalFinalizerListSeg)) { //the FinalizerList is empty, we can adjust both // limit instead of moving the object to the free list obj = *(--SegQueueLimit (CriticalFinalizerListSeg)); --SegQueueLimit (FinalizerListSeg); } if (obj) { dprintf (3, ("running finalizer for %Ix (mt: %Ix)", obj, method_table (obj))); } LeaveFinalizeLock(); return obj; } size_t CFinalize::GetNumberFinalizableObjects() { return SegQueueLimit(FinalizerListSeg) - SegQueue(FinalizerListSeg); } void CFinalize::MoveItem (Object** fromIndex, unsigned int fromSeg, unsigned int toSeg) { int step; ASSERT (fromSeg != toSeg); if (fromSeg > toSeg) step = -1; else step = +1; // Place the element at the boundary closest to dest Object** srcIndex = fromIndex; for (unsigned int i = fromSeg; i != toSeg; i+= step) { Object**& destFill = m_FillPointers[i+(step - 1 )/2]; Object** destIndex = destFill - (step + 1)/2; if (srcIndex != destIndex) { Object* tmp = *srcIndex; *srcIndex = *destIndex; *destIndex = tmp; } destFill -= step; srcIndex = destIndex; } } void CFinalize::GcScanRoots (promote_func* fn, int hn, ScanContext *pSC) { ScanContext sc; if (pSC == 0) pSC = &sc; pSC->thread_number = hn; //scan the finalization queue Object** startIndex = SegQueue (CriticalFinalizerListSeg); Object** stopIndex = SegQueueLimit (FinalizerListSeg); for (Object** po = startIndex; po < stopIndex; po++) { Object* o = *po; //dprintf (3, ("scan freacheable %Ix", (size_t)o)); dprintf (3, ("scan f %Ix", (size_t)o)); (*fn)(po, pSC, 0); } } void CFinalize::WalkFReachableObjects (fq_walk_fn fn) { Object** startIndex = SegQueue (CriticalFinalizerListSeg); Object** stopCriticalIndex = SegQueueLimit (CriticalFinalizerListSeg); Object** stopIndex = SegQueueLimit (FinalizerListSeg); for (Object** po = startIndex; po < stopIndex; po++) { fn(po < stopCriticalIndex, *po); } } BOOL CFinalize::ScanForFinalization (promote_func* pfn, int gen, BOOL mark_only_p, gc_heap* hp) { ScanContext sc; sc.promotion = TRUE; #ifdef MULTIPLE_HEAPS sc.thread_number = hp->heap_number; #else UNREFERENCED_PARAMETER(hp); #endif //MULTIPLE_HEAPS BOOL finalizedFound = FALSE; //start with gen and explore all the younger generations. unsigned int startSeg = gen_segment (gen); { m_PromotedCount = 0; for (unsigned int Seg = startSeg; Seg <= gen_segment(0); Seg++) { Object** endIndex = SegQueue (Seg); for (Object** i = SegQueueLimit (Seg)-1; i >= endIndex ;i--) { CObjectHeader* obj = (CObjectHeader*)*i; dprintf (3, ("scanning: %Ix", (size_t)obj)); if (!g_theGCHeap->IsPromoted (obj)) { dprintf (3, ("freacheable: %Ix", (size_t)obj)); assert (method_table(obj)->HasFinalizer()); if (GCToEEInterface::EagerFinalized(obj)) { MoveItem (i, Seg, FreeList); } else if ((obj->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN) { //remove the object because we don't want to //run the finalizer MoveItem (i, Seg, FreeList); //Reset the bit so it will be put back on the queue //if resurrected and re-registered. obj->GetHeader()->ClrBit (BIT_SBLK_FINALIZER_RUN); } else { m_PromotedCount++; if (method_table(obj)->HasCriticalFinalizer()) { MoveItem (i, Seg, CriticalFinalizerListSeg); } else { MoveItem (i, Seg, FinalizerListSeg); } } } #ifdef BACKGROUND_GC else { if ((gen == max_generation) && (gc_heap::background_running_p())) { // TODO - fix the following line. //assert (gc_heap::background_object_marked ((uint8_t*)obj, FALSE)); dprintf (3, ("%Ix is marked", (size_t)obj)); } } #endif //BACKGROUND_GC } } } finalizedFound = !IsSegEmpty(FinalizerListSeg) || !IsSegEmpty(CriticalFinalizerListSeg); if (finalizedFound) { //Promote the f-reachable objects GcScanRoots (pfn, #ifdef MULTIPLE_HEAPS hp->heap_number #else 0 #endif //MULTIPLE_HEAPS , 0); hp->settings.found_finalizers = TRUE; #ifdef BACKGROUND_GC if (hp->settings.concurrent) { hp->settings.found_finalizers = !(IsSegEmpty(FinalizerListSeg) && IsSegEmpty(CriticalFinalizerListSeg)); } #endif //BACKGROUND_GC if (hp->settings.concurrent && hp->settings.found_finalizers) { if (!mark_only_p) GCToEEInterface::EnableFinalization(true); } } return finalizedFound; } //Relocates all of the objects in the finalization array void CFinalize::RelocateFinalizationData (int gen, gc_heap* hp) { ScanContext sc; sc.promotion = FALSE; #ifdef MULTIPLE_HEAPS sc.thread_number = hp->heap_number; #else UNREFERENCED_PARAMETER(hp); #endif //MULTIPLE_HEAPS unsigned int Seg = gen_segment (gen); Object** startIndex = SegQueue (Seg); dprintf (3, ("RelocateFinalizationData gen=%d, [%Ix,%Ix[", gen, startIndex, SegQueue (FreeList))); for (Object** po = startIndex; po < SegQueue (FreeList);po++) { GCHeap::Relocate (po, &sc); } } void CFinalize::UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p) { dprintf(3, ("UpdatePromotedGenerations gen=%d, gen_0_empty_p=%d", gen, gen_0_empty_p)); // update the generation fill pointers. // if gen_0_empty is FALSE, test each object to find out if // it was promoted or not if (gen_0_empty_p) { for (int i = min (gen+1, max_generation); i > 0; i--) { m_FillPointers [gen_segment(i)] = m_FillPointers [gen_segment(i-1)]; } } else { //Look for demoted or promoted objects for (int i = gen; i >= 0; i--) { unsigned int Seg = gen_segment (i); Object** startIndex = SegQueue (Seg); for (Object** po = startIndex; po < SegQueueLimit (gen_segment(i)); po++) { int new_gen = g_theGCHeap->WhichGeneration (*po); if (new_gen != i) { dprintf (3, ("Moving object %Ix->%Ix from gen %d to gen %d", po, *po, i, new_gen)); if (new_gen > i) { //promotion MoveItem (po, gen_segment (i), gen_segment (new_gen)); } else { //demotion MoveItem (po, gen_segment (i), gen_segment (new_gen)); //back down in order to see all objects. po--; } } } } } } BOOL CFinalize::GrowArray() { size_t oldArraySize = (m_EndArray - m_Array); size_t newArraySize = (size_t)(((float)oldArraySize / 10) * 12); Object** newArray = new (nothrow) Object*[newArraySize]; if (!newArray) { return FALSE; } memcpy (newArray, m_Array, oldArraySize*sizeof(Object*)); dprintf (3, ("Grow finalizer array [%Ix,%Ix[ -> [%Ix,%Ix[", m_Array, m_EndArray, newArray, &m_Array[newArraySize])); //adjust the fill pointers for (int i = 0; i < FreeList; i++) { m_FillPointers [i] += (newArray - m_Array); } delete[] m_Array; m_Array = newArray; m_EndArray = &m_Array [newArraySize]; return TRUE; } #ifdef VERIFY_HEAP void CFinalize::CheckFinalizerObjects() { for (int i = 0; i <= max_generation; i++) { Object **startIndex = SegQueue (gen_segment (i)); Object **stopIndex = SegQueueLimit (gen_segment (i)); for (Object **po = startIndex; po < stopIndex; po++) { if ((int)g_theGCHeap->WhichGeneration (*po) < i) FATAL_GC_ERROR (); ((CObjectHeader*)*po)->Validate(); } } } #endif //VERIFY_HEAP #endif // FEATURE_PREMORTEM_FINALIZATION //------------------------------------------------------------------------------ // // End of VM specific support // //------------------------------------------------------------------------------ void gc_heap::walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p) { generation* gen = gc_heap::generation_of (gen_number); heap_segment* seg = generation_start_segment (gen); uint8_t* x = ((gen_number == max_generation) ? heap_segment_mem (seg) : get_soh_start_object (seg, gen)); uint8_t* end = heap_segment_allocated (seg); int align_const = get_alignment_constant (TRUE); BOOL walk_pinned_object_heap = walk_large_object_heap_p; while (1) { if (x >= end) { if ((seg = heap_segment_next (seg)) != 0) { x = heap_segment_mem (seg); end = heap_segment_allocated (seg); continue; } #ifdef USE_REGIONS else if (gen_number > 0) { // advance to next lower generation gen_number--; gen = gc_heap::generation_of (gen_number); seg = generation_start_segment (gen); x = heap_segment_mem (seg); end = heap_segment_allocated (seg); continue; } #endif // USE_REGIONS else { if (walk_large_object_heap_p) { walk_large_object_heap_p = FALSE; seg = generation_start_segment (large_object_generation); } else if (walk_pinned_object_heap) { walk_pinned_object_heap = FALSE; seg = generation_start_segment (pinned_object_generation); } else { break; } align_const = get_alignment_constant (FALSE); x = heap_segment_mem (seg); end = heap_segment_allocated (seg); continue; } } size_t s = size (x); CObjectHeader* o = (CObjectHeader*)x; if (!o->IsFree()) { _ASSERTE(((size_t)o & 0x3) == 0); // Last two bits should never be set at this point if (!fn (o->GetObjectBase(), context)) return; } x = x + Align (s, align_const); } } void gc_heap::walk_finalize_queue (fq_walk_fn fn) { #ifdef FEATURE_PREMORTEM_FINALIZATION finalize_queue->WalkFReachableObjects (fn); #endif //FEATURE_PREMORTEM_FINALIZATION } void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p) { #ifdef MULTIPLE_HEAPS for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; hp->walk_heap_per_heap (fn, context, gen_number, walk_large_object_heap_p); } #else walk_heap_per_heap(fn, context, gen_number, walk_large_object_heap_p); #endif //MULTIPLE_HEAPS } void GCHeap::DiagWalkObject (Object* obj, walk_fn fn, void* context) { uint8_t* o = (uint8_t*)obj; if (o) { go_through_object_cl (method_table (o), o, size(o), oo, { if (*oo) { Object *oh = (Object*)*oo; if (!fn (oh, context)) return; } } ); } } void GCHeap::DiagWalkObject2 (Object* obj, walk_fn2 fn, void* context) { uint8_t* o = (uint8_t*)obj; if (o) { go_through_object_cl (method_table (o), o, size(o), oo, { if (*oo) { if (!fn (obj, oo, context)) return; } } ); } } void GCHeap::DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, void* diag_context, walk_surv_type type, int gen_number) { gc_heap* hp = (gc_heap*)gc_context; if (type == walk_for_uoh) { hp->walk_survivors_for_uoh (diag_context, fn, gen_number); } else { hp->walk_survivors (fn, diag_context, type); } } void GCHeap::DiagWalkHeap (walk_fn fn, void* context, int gen_number, bool walk_large_object_heap_p) { gc_heap::walk_heap (fn, context, gen_number, walk_large_object_heap_p); } void GCHeap::DiagWalkFinalizeQueue (void* gc_context, fq_walk_fn fn) { gc_heap* hp = (gc_heap*)gc_context; hp->walk_finalize_queue (fn); } void GCHeap::DiagScanFinalizeQueue (fq_scan_fn fn, ScanContext* sc) { #ifdef MULTIPLE_HEAPS for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; hp->finalize_queue->GcScanRoots(fn, hn, sc); } #else pGenGCHeap->finalize_queue->GcScanRoots(fn, 0, sc); #endif //MULTIPLE_HEAPS } void GCHeap::DiagScanHandles (handle_scan_fn fn, int gen_number, ScanContext* context) { GCScan::GcScanHandlesForProfilerAndETW (gen_number, context, fn); } void GCHeap::DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context) { GCScan::GcScanDependentHandlesForProfilerAndETW (gen_number, context, fn); } void GCHeap::DiagGetGCSettings(EtwGCSettingsInfo* etw_settings) { #ifdef FEATURE_EVENT_TRACE etw_settings->heap_hard_limit = gc_heap::heap_hard_limit; etw_settings->loh_threshold = loh_size_threshold; etw_settings->physical_memory_from_config = gc_heap::physical_memory_from_config; etw_settings->gen0_min_budget_from_config = gc_heap::gen0_min_budget_from_config; etw_settings->gen0_max_budget_from_config = gc_heap::gen0_max_budget_from_config; etw_settings->high_mem_percent_from_config = gc_heap::high_mem_percent_from_config; #ifdef BACKGROUND_GC etw_settings->concurrent_gc_p = gc_heap::gc_can_use_concurrent; #else etw_settings->concurrent_gc_p = false; #endif //BACKGROUND_GC etw_settings->use_large_pages_p = gc_heap::use_large_pages_p; etw_settings->use_frozen_segments_p = gc_heap::use_frozen_segments_p; etw_settings->hard_limit_config_p = gc_heap::hard_limit_config_p; etw_settings->no_affinitize_p = #ifdef MULTIPLE_HEAPS gc_heap::gc_thread_no_affinitize_p; #else true; #endif //MULTIPLE_HEAPS #endif //FEATURE_EVENT_TRACE } #if defined(WRITE_BARRIER_CHECK) && !defined (SERVER_GC) // This code is designed to catch the failure to update the write barrier // The way it works is to copy the whole heap right after every GC. The write // barrier code has been modified so that it updates the shadow as well as the // real GC heap. Before doing the next GC, we walk the heap, looking for pointers // that were updated in the real heap, but not the shadow. A mismatch indicates // an error. The offending code can be found by breaking after the correct GC, // and then placing a data breakpoint on the Heap location that was updated without // going through the write barrier. // Called at process shutdown void deleteGCShadow() { if (g_GCShadow != 0) GCToOSInterface::VirtualRelease (g_GCShadow, g_GCShadowEnd - g_GCShadow); g_GCShadow = 0; g_GCShadowEnd = 0; } // Called at startup and right after a GC, get a snapshot of the GC Heap void initGCShadow() { if (!(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_BARRIERCHECK)) return; size_t len = g_gc_highest_address - g_gc_lowest_address; if (len > (size_t)(g_GCShadowEnd - g_GCShadow)) { deleteGCShadow(); g_GCShadowEnd = g_GCShadow = (uint8_t *)GCToOSInterface::VirtualReserve(len, 0, VirtualReserveFlags::None); if (g_GCShadow == NULL || !GCToOSInterface::VirtualCommit(g_GCShadow, len)) { _ASSERTE(!"Not enough memory to run HeapVerify level 2"); // If after the assert we decide to allow the program to continue // running we need to be in a state that will not trigger any // additional AVs while we fail to allocate a shadow segment, i.e. // ensure calls to updateGCShadow() checkGCWriteBarrier() don't AV deleteGCShadow(); return; } g_GCShadowEnd += len; } // save the value of g_gc_lowest_address at this time. If this value changes before // the next call to checkGCWriteBarrier() it means we extended the heap (with a // large object segment most probably), and the whole shadow segment is inconsistent. g_shadow_lowest_address = g_gc_lowest_address; //****** Copy the whole GC heap ****** // // NOTE: This is the one situation where the combination of heap_segment_rw(gen_start_segment()) // can produce a NULL result. This is because the initialization has not completed. // for (int i = get_start_generation_index(); i < total_generation_count; i++) { generation* gen = gc_heap::generation_of (i); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); ptrdiff_t delta = g_GCShadow - g_gc_lowest_address; while (seg) { // Copy the segment uint8_t* start = heap_segment_mem(seg); uint8_t* end = heap_segment_allocated (seg); memcpy(start + delta, start, end - start); seg = heap_segment_next_rw (seg); } } } #define INVALIDGCVALUE (void*)((size_t)0xcccccccd) // test to see if 'ptr' was only updated via the write barrier. inline void testGCShadow(Object** ptr) { Object** shadow = (Object**) &g_GCShadow[((uint8_t*) ptr - g_gc_lowest_address)]; if (*ptr != 0 && (uint8_t*) shadow < g_GCShadowEnd && *ptr != *shadow) { // If you get this assertion, someone updated a GC pointer in the heap without // using the write barrier. To find out who, check the value of // dd_collection_count (dynamic_data_of (0)). Also // note the value of 'ptr'. Rerun the App that the previous GC just occurred. // Then put a data breakpoint for the value of 'ptr' Then check every write // to pointer between the two GCs. The last one is not using the write barrier. // If the memory of interest does not exist at system startup, // you need to set the data breakpoint right after the memory gets committed // Set a breakpoint at the end of grow_heap_segment, and put the value of 'ptr' // in the memory window. run until the memory gets mapped. Then you can set // your breakpoint // Note a recent change, we've identified race conditions when updating the gc shadow. // Throughout the runtime, code will update an address in the gc heap, then erect the // write barrier, which calls updateGCShadow. With an app that pounds one heap location // from multiple threads, you can hit this assert even though all involved are using the // write barrier properly. Thusly, we detect the race and set this location to INVALIDGCVALUE. // TODO: the code in jithelp.asm doesn't call updateGCShadow, and hasn't been // TODO: fixed to detect the race. We've only seen this race from VolatileWritePtr, // TODO: so elect not to fix jithelp.asm at this time. It should be done if we start hitting // TODO: erroneous asserts in here. if(*shadow!=INVALIDGCVALUE) { #ifdef FEATURE_BASICFREEZE // Write barriers for stores of references to frozen objects may be optimized away. if (!gc_heap::frozen_object_p(*ptr)) #endif // FEATURE_BASICFREEZE { _ASSERTE(!"Pointer updated without using write barrier"); } } /* else { printf("saw a INVALIDGCVALUE. (just to let you know)\n"); } */ } } void testGCShadowHelper (uint8_t* x) { size_t s = size (x); if (contain_pointers (x)) { go_through_object_nostart (method_table(x), x, s, oo, { testGCShadow((Object**) oo); }); } } // Walk the whole heap, looking for pointers that were not updated with the write barrier. void checkGCWriteBarrier() { // g_shadow_lowest_address != g_gc_lowest_address means the GC heap was extended by a segment // and the GC shadow segment did not track that change! if (g_GCShadowEnd <= g_GCShadow || g_shadow_lowest_address != g_gc_lowest_address) { // No shadow stack, nothing to check. return; } { generation* gen = gc_heap::generation_of (max_generation); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(seg != NULL); while(seg) { uint8_t* x = heap_segment_mem(seg); while (x < heap_segment_allocated (seg)) { size_t s = size (x); testGCShadowHelper (x); x = x + Align (s); } seg = heap_segment_next_rw (seg); } } { // go through non-soh object heaps int alignment = get_alignment_constant(FALSE); for (int i = uoh_start_generation; i < total_generation_count; i++) { generation* gen = gc_heap::generation_of (i); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); PREFIX_ASSUME(seg != NULL); while(seg) { uint8_t* x = heap_segment_mem(seg); while (x < heap_segment_allocated (seg)) { size_t s = size (x); testGCShadowHelper (x); x = x + Align (s, alignment); } seg = heap_segment_next_rw (seg); } } } } #endif //WRITE_BARRIER_CHECK && !SERVER_GC #ifdef FEATURE_BASICFREEZE void gc_heap::walk_read_only_segment(heap_segment *seg, void *pvContext, object_callback_func pfnMethodTable, object_callback_func pfnObjRef) { uint8_t *o = heap_segment_mem(seg); int alignment = get_alignment_constant(TRUE); while (o < heap_segment_allocated(seg)) { pfnMethodTable(pvContext, o); if (contain_pointers (o)) { go_through_object_nostart (method_table (o), o, size(o), oo, { if (*oo) pfnObjRef(pvContext, oo); } ); } o += Align(size(o), alignment); } } #endif // FEATURE_BASICFREEZE HRESULT GCHeap::WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout) { #ifdef BACKGROUND_GC if (gc_heap::background_running_p()) { uint32_t dwRet = pGenGCHeap->background_gc_wait(awr_ignored, millisecondsTimeout); if (dwRet == WAIT_OBJECT_0) return S_OK; else if (dwRet == WAIT_TIMEOUT) return HRESULT_FROM_WIN32(ERROR_TIMEOUT); else return E_FAIL; // It is not clear if what the last error would be if the wait failed, // as there are too many layers in between. The best we can do is to return E_FAIL; } #endif return S_OK; } void GCHeap::TemporaryEnableConcurrentGC() { #ifdef BACKGROUND_GC gc_heap::temp_disable_concurrent_p = false; #endif //BACKGROUND_GC } void GCHeap::TemporaryDisableConcurrentGC() { #ifdef BACKGROUND_GC gc_heap::temp_disable_concurrent_p = true; #endif //BACKGROUND_GC } bool GCHeap::IsConcurrentGCEnabled() { #ifdef BACKGROUND_GC return (gc_heap::gc_can_use_concurrent && !(gc_heap::temp_disable_concurrent_p)); #else return FALSE; #endif //BACKGROUND_GC } void PopulateDacVars(GcDacVars *gcDacVars) { #define DEFINE_FIELD(field_name, field_type) offsetof(CLASS_NAME, field_name), #define DEFINE_DPTR_FIELD(field_name, field_type) offsetof(CLASS_NAME, field_name), #define DEFINE_ARRAY_FIELD(field_name, field_type, array_length) offsetof(CLASS_NAME, field_name), #define DEFINE_MISSING_FIELD(field_name) -1, #ifdef MULTIPLE_HEAPS static int gc_heap_field_offsets[] = { #define CLASS_NAME gc_heap #include "dac_gcheap_fields.h" #undef CLASS_NAME offsetof(gc_heap, generation_table) }; static_assert(sizeof(gc_heap_field_offsets) == (GENERATION_TABLE_FIELD_INDEX + 1) * sizeof(int), "GENERATION_TABLE_INDEX mismatch"); #endif //MULTIPLE_HEAPS static int generation_field_offsets[] = { #define CLASS_NAME generation #include "dac_generation_fields.h" #undef CLASS_NAME #undef DEFINE_MISSING_FIELD #undef DEFINE_ARRAY_FIELD #undef DEFINE_DPTR_FIELD #undef DEFINE_FIELD }; assert(gcDacVars != nullptr); *gcDacVars = {}; // Note: these version numbers are not actually checked by SOS, so if you change // the GC in a way that makes it incompatible with SOS, please change // SOS_BREAKING_CHANGE_VERSION in both the runtime and the diagnostics repo gcDacVars->major_version_number = 1; gcDacVars->minor_version_number = 0; #ifdef USE_REGIONS gcDacVars->minor_version_number |= 1; #endif //USE_REGIONS gcDacVars->built_with_svr = &g_built_with_svr_gc; gcDacVars->build_variant = &g_build_variant; gcDacVars->gc_structures_invalid_cnt = const_cast<int32_t*>(&GCScan::m_GcStructuresInvalidCnt); gcDacVars->generation_size = sizeof(generation); gcDacVars->total_generation_count = total_generation_count; gcDacVars->max_gen = &g_max_generation; #ifdef BACKGROUND_GC gcDacVars->current_c_gc_state = const_cast<c_gc_state*>(&gc_heap::current_c_gc_state); #else //BACKGROUND_GC gcDacVars->current_c_gc_state = 0; #endif //BACKGROUND_GC #ifndef MULTIPLE_HEAPS gcDacVars->ephemeral_heap_segment = reinterpret_cast<dac_heap_segment**>(&gc_heap::ephemeral_heap_segment); #ifdef BACKGROUND_GC gcDacVars->mark_array = &gc_heap::mark_array; gcDacVars->background_saved_lowest_address = &gc_heap::background_saved_lowest_address; gcDacVars->background_saved_highest_address = &gc_heap::background_saved_highest_address; gcDacVars->next_sweep_obj = &gc_heap::next_sweep_obj; #ifdef USE_REGIONS gcDacVars->saved_sweep_ephemeral_seg = 0; gcDacVars->saved_sweep_ephemeral_start = 0; #else gcDacVars->saved_sweep_ephemeral_seg = reinterpret_cast<dac_heap_segment**>(&gc_heap::saved_sweep_ephemeral_seg); gcDacVars->saved_sweep_ephemeral_start = &gc_heap::saved_sweep_ephemeral_start; #endif //USE_REGIONS #else //BACKGROUND_GC gcDacVars->mark_array = 0; gcDacVars->background_saved_lowest_address = 0; gcDacVars->background_saved_highest_address = 0; gcDacVars->next_sweep_obj = 0; gcDacVars->saved_sweep_ephemeral_seg = 0; gcDacVars->saved_sweep_ephemeral_start = 0; #endif //BACKGROUND_GC gcDacVars->alloc_allocated = &gc_heap::alloc_allocated; gcDacVars->oom_info = &gc_heap::oom_info; gcDacVars->finalize_queue = reinterpret_cast<dac_finalize_queue**>(&gc_heap::finalize_queue); gcDacVars->generation_table = reinterpret_cast<unused_generation**>(&gc_heap::generation_table); #ifdef GC_CONFIG_DRIVEN gcDacVars->gc_global_mechanisms = reinterpret_cast<size_t**>(&gc_global_mechanisms); gcDacVars->interesting_data_per_heap = reinterpret_cast<size_t**>(&gc_heap::interesting_data_per_heap); gcDacVars->compact_reasons_per_heap = reinterpret_cast<size_t**>(&gc_heap::compact_reasons_per_heap); gcDacVars->expand_mechanisms_per_heap = reinterpret_cast<size_t**>(&gc_heap::expand_mechanisms_per_heap); gcDacVars->interesting_mechanism_bits_per_heap = reinterpret_cast<size_t**>(&gc_heap::interesting_mechanism_bits_per_heap); #endif // GC_CONFIG_DRIVEN #ifdef HEAP_ANALYZE gcDacVars->internal_root_array = &gc_heap::internal_root_array; gcDacVars->internal_root_array_index = &gc_heap::internal_root_array_index; gcDacVars->heap_analyze_success = &gc_heap::heap_analyze_success; #endif // HEAP_ANALYZE #else gcDacVars->n_heaps = &gc_heap::n_heaps; gcDacVars->g_heaps = reinterpret_cast<unused_gc_heap***>(&gc_heap::g_heaps); gcDacVars->gc_heap_field_offsets = reinterpret_cast<int**>(&gc_heap_field_offsets); #endif // MULTIPLE_HEAPS gcDacVars->generation_field_offsets = reinterpret_cast<int**>(&generation_field_offsets); }
1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/jit/CMakeLists.txt
set(CMAKE_INCLUDE_CURRENT_DIR ON) include_directories("./jitstd") include_directories("../inc") # gcc 10 and lower versions are too sensitive to bit fields width and warn from core compiler. # Since there is no better / specific suppression available for these core warnings, we disable # warn-as-error (-Werror) for JIT in this case. This issue has been fixed in gcc 11. # See https://github.com/dotnet/runtime/issues/33541 if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11.0) add_compile_options(-Wno-error) endif() function(create_standalone_jit) set(oneValueArgs TARGET OS ARCH) set(multiValueArgs DESTINATIONS) cmake_parse_arguments(TARGETDETAILS "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) if(TARGETDETAILS_OS STREQUAL "unix_osx") if (NOT (TARGETDETAILS_ARCH STREQUAL "arm64")) message(FATAL_ERROR "Only arm64 Apple has a special ABI, use just unix for x64 Mac OS." ) endif() set(JIT_ARCH_LINK_LIBRARIES gcinfo_unix_arm64) else() set(JIT_ARCH_LINK_LIBRARIES gcinfo_${TARGETDETAILS_OS}_${TARGETDETAILS_ARCH}) endif() if(TARGETDETAILS_ARCH STREQUAL "x64") set(JIT_ARCH_SOURCES ${JIT_AMD64_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_AMD64_HEADERS}) elseif((TARGETDETAILS_ARCH STREQUAL "arm") OR (TARGETDETAILS_ARCH STREQUAL "armel")) set(JIT_ARCH_SOURCES ${JIT_ARM_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_ARM_HEADERS}) elseif((TARGETDETAILS_ARCH STREQUAL "armv6") OR (TARGETDETAILS_ARCH STREQUAL "armv6l")) set(JIT_ARCH_SOURCES ${JIT_ARMV6_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_ARMV6_HEADERS}) elseif(TARGETDETAILS_ARCH STREQUAL "x86") set(JIT_ARCH_SOURCES ${JIT_I386_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_I386_HEADERS}) elseif(TARGETDETAILS_ARCH STREQUAL "arm64") set(JIT_ARCH_SOURCES ${JIT_ARM64_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_ARM64_HEADERS}) elseif(TARGETDETAILS_ARCH STREQUAL "s390x") set(JIT_ARCH_SOURCES ${JIT_S390X_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_S390X_HEADERS}) else() clr_unknown_arch() endif() if (TARGETDETAILS_DESTINATIONS STREQUAL "") add_jit(${TARGETDETAILS_TARGET}) else() add_jit(${TARGETDETAILS_TARGET} DESTINATIONS "${TARGETDETAILS_DESTINATIONS}") endif() set_target_definitions_to_custom_os_and_arch(${ARGN}) set_target_properties(${TARGETDETAILS_TARGET} PROPERTIES IGNORE_FEATURE_MERGE_JIT_AND_ENGINE TRUE) target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE FEATURE_NO_HOST) target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE SELF_NO_HOST) if ((TARGETDETAILS_ARCH STREQUAL "x64") OR (TARGETDETAILS_ARCH STREQUAL "arm64") OR ((TARGETDETAILS_ARCH STREQUAL "x86") AND NOT (TARGETDETAILS_OS STREQUAL "unix"))) target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE FEATURE_SIMD) target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE FEATURE_HW_INTRINSICS) endif () endfunction() if (CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR (CLR_CMAKE_TARGET_ARCH_I386 AND NOT CLR_CMAKE_HOST_UNIX)) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:FEATURE_SIMD>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:FEATURE_HW_INTRINSICS>) endif () # JIT_BUILD disables certain PAL_TRY debugging features add_definitions(-DJIT_BUILD) if(CLR_CMAKE_TARGET_WIN32) set(JIT_RESOURCES Native.rc) endif(CLR_CMAKE_TARGET_WIN32) set( JIT_SOURCES alloc.cpp assertionprop.cpp bitset.cpp block.cpp layout.cpp codegencommon.cpp codegenlinear.cpp compiler.cpp copyprop.cpp debuginfo.cpp disasm.cpp earlyprop.cpp ee_il_dll.cpp eeinterface.cpp emit.cpp error.cpp fgbasic.cpp fgdiagnostic.cpp fgehopt.cpp fgflow.cpp fginline.cpp fgopt.cpp fgprofile.cpp fgstmt.cpp flowgraph.cpp forwardsub.cpp gcdecode.cpp gcencode.cpp gcinfo.cpp gentree.cpp gschecks.cpp hashbv.cpp hwintrinsic.cpp hostallocator.cpp indirectcalltransformer.cpp importer.cpp importer_vectorization.cpp inline.cpp inlinepolicy.cpp instr.cpp jitconfig.cpp jiteh.cpp jithashtable.cpp jittelemetry.cpp lclmorph.cpp lclvars.cpp likelyclass.cpp lir.cpp liveness.cpp loopcloning.cpp lower.cpp lsra.cpp lsrabuild.cpp morph.cpp morphblock.cpp objectalloc.cpp optcse.cpp optimizer.cpp patchpoint.cpp phase.cpp rangecheck.cpp rationalize.cpp redundantbranchopts.cpp regalloc.cpp register_arg_convention.cpp regset.cpp scopeinfo.cpp sideeffects.cpp sm.cpp smdata.cpp smweights.cpp ssabuilder.cpp ssarenamestate.cpp stacklevelsetter.cpp treelifeupdater.cpp typeinfo.cpp unwind.cpp utils.cpp valuenum.cpp ) if (CLR_CMAKE_TARGET_WIN32) # Append clrjit.natvis file list (APPEND JIT_SOURCES clrjit.natvis) endif(CLR_CMAKE_TARGET_WIN32) # Define all the architecture-specific source files set( JIT_AMD64_SOURCES codegenxarch.cpp emitxarch.cpp lowerxarch.cpp lsraxarch.cpp simd.cpp simdashwintrinsic.cpp simdcodegenxarch.cpp targetamd64.cpp unwindamd64.cpp hwintrinsicxarch.cpp hwintrinsiccodegenxarch.cpp ) set( JIT_ARM_SOURCES codegenarmarch.cpp codegenarm.cpp decomposelongs.cpp emitarm.cpp lowerarmarch.cpp lsraarmarch.cpp lsraarm.cpp targetarm.cpp unwindarm.cpp ) set( JIT_I386_SOURCES codegenxarch.cpp decomposelongs.cpp emitxarch.cpp lowerxarch.cpp lsraxarch.cpp simd.cpp simdashwintrinsic.cpp simdcodegenxarch.cpp targetx86.cpp unwindx86.cpp hwintrinsicxarch.cpp hwintrinsiccodegenxarch.cpp ) set( JIT_ARM64_SOURCES codegenarmarch.cpp codegenarm64.cpp emitarm64.cpp lowerarmarch.cpp lsraarmarch.cpp lsraarm64.cpp simd.cpp simdashwintrinsic.cpp targetarm64.cpp unwindarm.cpp unwindarm64.cpp hwintrinsicarm64.cpp hwintrinsiccodegenarm64.cpp ) set( JIT_ARMV6_SOURCES # Not supported as JIT target ) set( JIT_S390X_SOURCES # Not supported as JIT target ) # We include the headers here for better experience in IDEs. set( JIT_HEADERS ../inc/corinfo.h ../inc/corjit.h ../inc/corjitflags.h ../inc/corjithost.h _typeinfo.h alloc.h arraystack.h bitset.h layout.h bitsetasshortlong.h bitsetasuint64.h bitsetasuint64inclass.h bitsetops.h bitvec.h block.h blockset.h codegen.h codegeninterface.h compiler.h compiler.hpp compilerbitsettraits.h compilerbitsettraits.hpp compmemkind.h compphases.h dataflow.h debuginfo.h decomposelongs.h disasm.h emit.h emitdef.h emitfmts.h emitinl.h emitjmps.h emitpub.h error.h gentree.h gtlist.h gtstructs.h hashbv.h host.h hostallocator.h hwintrinsic.h ICorJitInfo_API_names.h ICorJitInfo_API_wrapper.hpp inline.h inlinepolicy.h instr.h instrs.h jit.h jitconfig.h jitconfigvalues.h jitee.h jiteh.h jitexpandarray.h jitgcinfo.h jithashtable.h jitpch.h jitstd.h jittelemetry.h lir.h loopcloning.h loopcloningopts.h lower.h lsra_reftypes.h lsra_stats.h lsra_score.h lsra.h namedintrinsiclist.h objectalloc.h opcode.h phase.h rangecheck.h rationalize.h regalloc.h register_arg_convention.h register.h regset.h sideeffects.h simd.h simdashwintrinsic.h simdintrinsiclist.h sm.h smallhash.h smcommon.h smopenum.h ssabuilder.h ssaconfig.h ssarenamestate.h stacklevelsetter.h target.h targetx86.h targetamd64.h targetarm.h targetarm64.h tinyarray.h titypes.h treelifeupdater.h typelist.h unwind.h utils.h valuenum.h valuenumfuncs.h valuenumtype.h varset.h vartype.h vartypesdef.h ) # Arch specific headers set( JIT_AMD64_HEADERS emitfmtsxarch.h emitxarch.h hwintrinsiclistxarch.h hwintrinsic.h instrsxarch.h simdashwintrinsiclistxarch.h ) set( JIT_I386_HEADERS ${JIT_AMD64_HEADERS} ) set( JIT_ARM64_HEADERS emitarm64.h emitfmtsarm64.h hwintrinsiclistarm64.h instrsarm64.h registerarm64.h simdashwintrinsiclistarm64.h ) set( JIT_ARM_HEADERS emitarm.h emitfmtsarm.h instrsarm.h registerarm.h ) set ( JIT_ARMV6_HEADERS # Not supported as JIT target ) set ( JIT_S390X_HEADERS # Not supported as JIT target ) convert_to_absolute_path(JIT_SOURCES ${JIT_SOURCES}) convert_to_absolute_path(JIT_HEADERS ${JIT_HEADERS}) convert_to_absolute_path(JIT_RESOURCES ${JIT_RESOURCES}) # Also convert the per-architecture sources to absolute paths, if the subdirs want to use them. convert_to_absolute_path(JIT_AMD64_SOURCES ${JIT_AMD64_SOURCES}) convert_to_absolute_path(JIT_AMD64_HEADERS ${JIT_AMD64_HEADERS}) convert_to_absolute_path(JIT_ARM_SOURCES ${JIT_ARM_SOURCES}) convert_to_absolute_path(JIT_ARM_HEADERS ${JIT_ARM_HEADERS}) convert_to_absolute_path(JIT_I386_SOURCES ${JIT_I386_SOURCES}) convert_to_absolute_path(JIT_I386_HEADERS ${JIT_I386_HEADERS}) convert_to_absolute_path(JIT_ARM64_SOURCES ${JIT_ARM64_SOURCES}) convert_to_absolute_path(JIT_ARM64_HEADERS ${JIT_ARM64_HEADERS}) convert_to_absolute_path(JIT_ARMV6_SOURCES ${JIT_ARMV6_SOURCES}) convert_to_absolute_path(JIT_ARMV6_HEADERS ${JIT_ARMV6_HEADERS}) convert_to_absolute_path(JIT_S390X_SOURCES ${JIT_S390X_SOURCES}) convert_to_absolute_path(JIT_S390X_HEADERS ${JIT_S390X_HEADERS}) if(CLR_CMAKE_TARGET_ARCH_AMD64) set(JIT_ARCH_SOURCES ${JIT_AMD64_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_AMD64_HEADERS}) elseif(CLR_CMAKE_TARGET_ARCH_ARM) set(JIT_ARCH_SOURCES ${JIT_ARM_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_ARM_HEADERS}) elseif(CLR_CMAKE_TARGET_ARCH_ARMV6) set(JIT_ARCH_SOURCES ${JIT_ARMV6_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_ARMV6_HEADERS}) elseif(CLR_CMAKE_TARGET_ARCH_I386) set(JIT_ARCH_SOURCES ${JIT_I386_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_I386_HEADERS}) elseif(CLR_CMAKE_TARGET_ARCH_ARM64) set(JIT_ARCH_SOURCES ${JIT_ARM64_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_ARM64_HEADERS}) elseif(CLR_CMAKE_TARGET_ARCH_S390X) set(JIT_ARCH_SOURCES ${JIT_S390X_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_S390X_HEADERS}) else() clr_unknown_arch() endif() set(JIT_DLL_MAIN_FILE ${CMAKE_CURRENT_LIST_DIR}/dllmain.cpp) if(CLR_CMAKE_TARGET_WIN32) set(CLRJIT_EXPORTS ${CMAKE_CURRENT_LIST_DIR}/ClrJit.exports) set(JIT_EXPORTS_FILE ${CMAKE_CURRENT_BINARY_DIR}/ClrJit.exports.def) preprocess_file (${CLRJIT_EXPORTS} ${JIT_EXPORTS_FILE}) set(JIT_DEF_FILE ${JIT_EXPORTS_FILE}) else() set(CLRJIT_EXPORTS ${CMAKE_CURRENT_LIST_DIR}/ClrJit.PAL.exports) set(JIT_EXPORTS_FILE ${CMAKE_CURRENT_BINARY_DIR}/clrjit.exports) generate_exports_file(${CLRJIT_EXPORTS} ${JIT_EXPORTS_FILE}) if(CLR_CMAKE_TARGET_LINUX OR CLR_CMAKE_TARGET_FREEBSD OR CLR_CMAKE_TARGET_NETBSD OR CLR_CMAKE_TARGET_SUNOS) # This is required to force using our own PAL, not one that we are loaded with. set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Xlinker -Bsymbolic") endif() set_exports_linker_option(${JIT_EXPORTS_FILE}) set(JIT_EXPORTS_LINKER_OPTION ${EXPORTS_LINKER_OPTION}) endif() add_custom_target(jit_exports DEPENDS ${JIT_EXPORTS_FILE}) set(JIT_LINK_LIBRARIES utilcodestaticnohost ) set(JIT_ARCH_LINK_LIBRARIES gcinfo ) if(CLR_CMAKE_HOST_UNIX) list(APPEND JIT_LINK_LIBRARIES mscorrc coreclrpal palrt ) else() list(APPEND JIT_LINK_LIBRARIES ${STATIC_MT_CRT_LIB} ${STATIC_MT_VCRT_LIB} kernel32.lib advapi32.lib ole32.lib oleaut32.lib uuid.lib user32.lib version.lib shlwapi.lib bcrypt.lib crypt32.lib RuntimeObject.lib ) endif(CLR_CMAKE_HOST_UNIX) # Shared function for generating JIT # optional arguments: DESTINATIONS path function(add_jit jitName) set_source_files_properties(${JIT_EXPORTS_FILE} PROPERTIES GENERATED TRUE) if (CLR_CMAKE_TARGET_WIN32) # If generating for Visual Studio then include headers for a better # IDE experience. add_library_clr(${jitName} SHARED ${JIT_SOURCES} ${JIT_ARCH_SOURCES} ${JIT_HEADERS} ${JIT_ARCH_HEADERS} ${JIT_RESOURCES} ${JIT_DEF_FILE} ${JIT_DLL_MAIN_FILE} ) else() add_library_clr(${jitName} SHARED ${JIT_SOURCES} ${JIT_ARCH_SOURCES} ${JIT_RESOURCES} ${JIT_DEF_FILE} ${JIT_DLL_MAIN_FILE} ) endif(CLR_CMAKE_TARGET_WIN32) if(CLR_CMAKE_TARGET_WIN32) target_compile_definitions(${jitName} PRIVATE FX_VER_INTERNALNAME_STR=${jitName}.dll) endif(CLR_CMAKE_TARGET_WIN32) target_include_directories(${jitName} PRIVATE ${JIT_SOURCE_DIR}) target_precompile_headers(${jitName} PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:jitpch.h>") add_dependencies(${jitName} jit_exports) set_property(TARGET ${jitName} APPEND_STRING PROPERTY LINK_FLAGS ${JIT_EXPORTS_LINKER_OPTION}) set_property(TARGET ${jitName} APPEND_STRING PROPERTY LINK_DEPENDS ${JIT_EXPORTS_FILE}) set_target_properties(${jitName} PROPERTIES MSVC_WARNING_LEVEL 4) target_link_libraries(${jitName} ${JIT_LINK_LIBRARIES} ${JIT_ARCH_LINK_LIBRARIES} ) if (CLR_CMAKE_HOST_WIN32) link_natvis_sources_for_target(${jitName} PRIVATE clrjit.natvis) endif() # add the install targets install_clr(TARGETS ${jitName} ${ARGN} COMPONENT alljits) endfunction() set(JIT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) # Creates a static library "clrjit_static" to link into the VM. add_subdirectory(static) if (CLR_CMAKE_TARGET_OSX AND CLR_CMAKE_TARGET_ARCH_ARM64) set(TARGET_OS_NAME unix_osx) # Apple Arm64 has a special ABI, distinguish it. elseif (CLR_CMAKE_TARGET_UNIX) set(TARGET_OS_NAME unix) else() set(TARGET_OS_NAME win) endif() create_standalone_jit(TARGET clrjit OS ${TARGET_OS_NAME} ARCH ${ARCH_TARGET_NAME} DESTINATIONS . sharedFramework) install_clr(TARGETS clrjit DESTINATIONS . sharedFramework COMPONENT jit) # Enable profile guided optimization add_pgo(clrjit) if (CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_AMD64) create_standalone_jit(TARGET clrjit_universal_arm64_${ARCH_HOST_NAME} OS universal ARCH arm64 DESTINATIONS .) create_standalone_jit(TARGET clrjit_unix_x64_${ARCH_HOST_NAME} OS unix ARCH x64 DESTINATIONS .) create_standalone_jit(TARGET clrjit_win_x64_${ARCH_HOST_NAME} OS win ARCH x64 DESTINATIONS .) endif (CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_AMD64) create_standalone_jit(TARGET clrjit_universal_arm_${ARCH_HOST_NAME} OS universal ARCH arm DESTINATIONS .) target_compile_definitions(clrjit_universal_arm_${ARCH_HOST_NAME} PRIVATE ARM_SOFTFP CONFIGURABLE_ARM_ABI) create_standalone_jit(TARGET clrjit_win_x86_${ARCH_HOST_NAME} OS win ARCH x86 DESTINATIONS .) if (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX) create_standalone_jit(TARGET clrjit_unix_x86_${ARCH_HOST_NAME} OS unix ARCH x86 DESTINATIONS .) endif (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX) if (CLR_CMAKE_TARGET_UNIX) if (NOT ARCH_TARGET_NAME STREQUAL s390x AND NOT ARCH_TARGET_NAME STREQUAL armv6) if(CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_ARM64) install_clr(TARGETS clrjit_universal_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} DESTINATIONS . COMPONENT jit) else() install_clr(TARGETS clrjit_unix_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} DESTINATIONS . COMPONENT jit) endif() endif(NOT ARCH_TARGET_NAME STREQUAL s390x AND NOT ARCH_TARGET_NAME STREQUAL armv6) endif() if (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_PGO_INSTRUMENT) # Copy PGO dependency to target dir set(PGORT_DLL "pgort140.dll") find_path(PGORT_DIR ${PGORT_DLL} REQUIRED) install(FILES "${PGORT_DIR}/${PGORT_DLL}" DESTINATION ${CMAKE_INSTALL_PREFIX}) install(FILES "${PGORT_DIR}/${PGORT_DLL}" DESTINATION ${CMAKE_INSTALL_PREFIX}/sharedFramework) endif ()
set(CMAKE_INCLUDE_CURRENT_DIR ON) include_directories("./jitstd") include_directories("../inc") # gcc 10 and lower versions are too sensitive to bit fields width and warn from core compiler. # Since there is no better / specific suppression available for these core warnings, we disable # warn-as-error (-Werror) for JIT in this case. This issue has been fixed in gcc 11. # See https://github.com/dotnet/runtime/issues/33541 if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11.0) add_compile_options(-Wno-error) endif() function(create_standalone_jit) set(oneValueArgs TARGET OS ARCH) set(multiValueArgs DESTINATIONS) cmake_parse_arguments(TARGETDETAILS "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) if(TARGETDETAILS_OS STREQUAL "unix_osx") if (NOT (TARGETDETAILS_ARCH STREQUAL "arm64")) message(FATAL_ERROR "Only arm64 Apple has a special ABI, use just unix for x64 Mac OS." ) endif() set(JIT_ARCH_LINK_LIBRARIES gcinfo_unix_arm64) else() set(JIT_ARCH_LINK_LIBRARIES gcinfo_${TARGETDETAILS_OS}_${TARGETDETAILS_ARCH}) endif() if(TARGETDETAILS_ARCH STREQUAL "x64") set(JIT_ARCH_SOURCES ${JIT_AMD64_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_AMD64_HEADERS}) elseif((TARGETDETAILS_ARCH STREQUAL "arm") OR (TARGETDETAILS_ARCH STREQUAL "armel")) set(JIT_ARCH_SOURCES ${JIT_ARM_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_ARM_HEADERS}) elseif((TARGETDETAILS_ARCH STREQUAL "armv6") OR (TARGETDETAILS_ARCH STREQUAL "armv6l")) set(JIT_ARCH_SOURCES ${JIT_ARMV6_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_ARMV6_HEADERS}) elseif(TARGETDETAILS_ARCH STREQUAL "x86") set(JIT_ARCH_SOURCES ${JIT_I386_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_I386_HEADERS}) elseif(TARGETDETAILS_ARCH STREQUAL "arm64") set(JIT_ARCH_SOURCES ${JIT_ARM64_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_ARM64_HEADERS}) elseif(TARGETDETAILS_ARCH STREQUAL "s390x") set(JIT_ARCH_SOURCES ${JIT_S390X_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_S390X_HEADERS}) else() clr_unknown_arch() endif() if (TARGETDETAILS_DESTINATIONS STREQUAL "") add_jit(${TARGETDETAILS_TARGET}) else() add_jit(${TARGETDETAILS_TARGET} DESTINATIONS "${TARGETDETAILS_DESTINATIONS}") endif() set_target_definitions_to_custom_os_and_arch(${ARGN}) set_target_properties(${TARGETDETAILS_TARGET} PROPERTIES IGNORE_FEATURE_MERGE_JIT_AND_ENGINE TRUE) target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE FEATURE_NO_HOST) target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE SELF_NO_HOST) if ((TARGETDETAILS_ARCH STREQUAL "x64") OR (TARGETDETAILS_ARCH STREQUAL "arm64") OR ((TARGETDETAILS_ARCH STREQUAL "x86") AND NOT (TARGETDETAILS_OS STREQUAL "unix"))) target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE FEATURE_SIMD) target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE FEATURE_HW_INTRINSICS) endif () endfunction() if (CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR (CLR_CMAKE_TARGET_ARCH_I386 AND NOT CLR_CMAKE_HOST_UNIX)) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:FEATURE_SIMD>) add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:IGNORE_DEFAULT_TARGET_ARCH>>>:FEATURE_HW_INTRINSICS>) endif () # JIT_BUILD disables certain PAL_TRY debugging features add_definitions(-DJIT_BUILD) if(CLR_CMAKE_TARGET_WIN32) set(JIT_RESOURCES Native.rc) endif(CLR_CMAKE_TARGET_WIN32) set( JIT_SOURCES alloc.cpp assertionprop.cpp bitset.cpp block.cpp layout.cpp codegencommon.cpp codegenlinear.cpp compiler.cpp copyprop.cpp debuginfo.cpp disasm.cpp earlyprop.cpp ee_il_dll.cpp eeinterface.cpp emit.cpp error.cpp fgbasic.cpp fgdiagnostic.cpp fgehopt.cpp fgflow.cpp fginline.cpp fgopt.cpp fgprofile.cpp fgstmt.cpp flowgraph.cpp forwardsub.cpp gcdecode.cpp gcencode.cpp gcinfo.cpp gentree.cpp gschecks.cpp hashbv.cpp hwintrinsic.cpp hostallocator.cpp indirectcalltransformer.cpp importer.cpp importer_vectorization.cpp inline.cpp inlinepolicy.cpp instr.cpp jitconfig.cpp jiteh.cpp jithashtable.cpp jittelemetry.cpp lclmorph.cpp lclvars.cpp likelyclass.cpp lir.cpp liveness.cpp loopcloning.cpp lower.cpp lsra.cpp lsrabuild.cpp morph.cpp morphblock.cpp objectalloc.cpp optcse.cpp optimizer.cpp patchpoint.cpp phase.cpp rangecheck.cpp rationalize.cpp redundantbranchopts.cpp regalloc.cpp register_arg_convention.cpp regset.cpp scopeinfo.cpp sideeffects.cpp sm.cpp smdata.cpp smweights.cpp ssabuilder.cpp ssarenamestate.cpp stacklevelsetter.cpp treelifeupdater.cpp typeinfo.cpp unwind.cpp utils.cpp valuenum.cpp ) if (CLR_CMAKE_TARGET_WIN32) # Append clrjit.natvis file list (APPEND JIT_SOURCES clrjit.natvis) endif(CLR_CMAKE_TARGET_WIN32) # Define all the architecture-specific source files set( JIT_AMD64_SOURCES codegenxarch.cpp emitxarch.cpp lowerxarch.cpp lsraxarch.cpp simd.cpp simdashwintrinsic.cpp simdcodegenxarch.cpp targetamd64.cpp unwindamd64.cpp hwintrinsicxarch.cpp hwintrinsiccodegenxarch.cpp ) set( JIT_ARM_SOURCES codegenarmarch.cpp codegenarm.cpp decomposelongs.cpp emitarm.cpp lowerarmarch.cpp lsraarmarch.cpp lsraarm.cpp targetarm.cpp unwindarm.cpp ) set( JIT_I386_SOURCES codegenxarch.cpp decomposelongs.cpp emitxarch.cpp lowerxarch.cpp lsraxarch.cpp simd.cpp simdashwintrinsic.cpp simdcodegenxarch.cpp targetx86.cpp unwindx86.cpp hwintrinsicxarch.cpp hwintrinsiccodegenxarch.cpp ) set( JIT_ARM64_SOURCES codegenarmarch.cpp codegenarm64.cpp emitarm64.cpp lowerarmarch.cpp lsraarmarch.cpp lsraarm64.cpp simd.cpp simdashwintrinsic.cpp targetarm64.cpp unwindarm.cpp unwindarm64.cpp hwintrinsicarm64.cpp hwintrinsiccodegenarm64.cpp ) set( JIT_ARMV6_SOURCES # Not supported as JIT target ) set( JIT_S390X_SOURCES # Not supported as JIT target ) # We include the headers here for better experience in IDEs. set( JIT_HEADERS ../inc/corinfo.h ../inc/corjit.h ../inc/corjitflags.h ../inc/corjithost.h _typeinfo.h alloc.h arraystack.h bitset.h layout.h bitsetasshortlong.h bitsetasuint64.h bitsetasuint64inclass.h bitsetops.h bitvec.h block.h blockset.h codegen.h codegeninterface.h compiler.h compiler.hpp compilerbitsettraits.h compilerbitsettraits.hpp compmemkind.h compphases.h dataflow.h debuginfo.h decomposelongs.h disasm.h emit.h emitdef.h emitfmts.h emitinl.h emitjmps.h emitpub.h error.h gentree.h gtlist.h gtstructs.h hashbv.h host.h hostallocator.h hwintrinsic.h ICorJitInfo_API_names.h ICorJitInfo_API_wrapper.hpp inline.h inlinepolicy.h instr.h instrs.h jit.h jitconfig.h jitconfigvalues.h jitee.h jiteh.h jitexpandarray.h jitgcinfo.h jithashtable.h jitpch.h jitstd.h jittelemetry.h lir.h loopcloning.h loopcloningopts.h lower.h lsra_reftypes.h lsra_stats.h lsra_score.h lsra.h namedintrinsiclist.h objectalloc.h opcode.h phase.h rangecheck.h rationalize.h regalloc.h register_arg_convention.h register.h regset.h sideeffects.h simd.h simdashwintrinsic.h simdintrinsiclist.h sm.h smallhash.h smcommon.h smopenum.h ssabuilder.h ssaconfig.h ssarenamestate.h stacklevelsetter.h target.h targetx86.h targetamd64.h targetarm.h targetarm64.h tinyarray.h titypes.h treelifeupdater.h typelist.h unwind.h utils.h valuenum.h valuenumfuncs.h valuenumtype.h varset.h vartype.h vartypesdef.h ) # Arch specific headers set( JIT_AMD64_HEADERS emitfmtsxarch.h emitxarch.h hwintrinsiclistxarch.h hwintrinsic.h instrsxarch.h simdashwintrinsiclistxarch.h ) set( JIT_I386_HEADERS ${JIT_AMD64_HEADERS} ) set( JIT_ARM64_HEADERS emitarm64.h emitfmtsarm64.h hwintrinsiclistarm64.h instrsarm64.h registerarm64.h simdashwintrinsiclistarm64.h ) set( JIT_ARM_HEADERS emitarm.h emitfmtsarm.h instrsarm.h registerarm.h ) set ( JIT_ARMV6_HEADERS # Not supported as JIT target ) set ( JIT_S390X_HEADERS # Not supported as JIT target ) convert_to_absolute_path(JIT_SOURCES ${JIT_SOURCES}) convert_to_absolute_path(JIT_HEADERS ${JIT_HEADERS}) convert_to_absolute_path(JIT_RESOURCES ${JIT_RESOURCES}) # Also convert the per-architecture sources to absolute paths, if the subdirs want to use them. convert_to_absolute_path(JIT_AMD64_SOURCES ${JIT_AMD64_SOURCES}) convert_to_absolute_path(JIT_AMD64_HEADERS ${JIT_AMD64_HEADERS}) convert_to_absolute_path(JIT_ARM_SOURCES ${JIT_ARM_SOURCES}) convert_to_absolute_path(JIT_ARM_HEADERS ${JIT_ARM_HEADERS}) convert_to_absolute_path(JIT_I386_SOURCES ${JIT_I386_SOURCES}) convert_to_absolute_path(JIT_I386_HEADERS ${JIT_I386_HEADERS}) convert_to_absolute_path(JIT_ARM64_SOURCES ${JIT_ARM64_SOURCES}) convert_to_absolute_path(JIT_ARM64_HEADERS ${JIT_ARM64_HEADERS}) convert_to_absolute_path(JIT_ARMV6_SOURCES ${JIT_ARMV6_SOURCES}) convert_to_absolute_path(JIT_ARMV6_HEADERS ${JIT_ARMV6_HEADERS}) convert_to_absolute_path(JIT_S390X_SOURCES ${JIT_S390X_SOURCES}) convert_to_absolute_path(JIT_S390X_HEADERS ${JIT_S390X_HEADERS}) if(CLR_CMAKE_TARGET_ARCH_AMD64) set(JIT_ARCH_SOURCES ${JIT_AMD64_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_AMD64_HEADERS}) elseif(CLR_CMAKE_TARGET_ARCH_ARM) set(JIT_ARCH_SOURCES ${JIT_ARM_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_ARM_HEADERS}) elseif(CLR_CMAKE_TARGET_ARCH_ARMV6) set(JIT_ARCH_SOURCES ${JIT_ARMV6_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_ARMV6_HEADERS}) elseif(CLR_CMAKE_TARGET_ARCH_I386) set(JIT_ARCH_SOURCES ${JIT_I386_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_I386_HEADERS}) elseif(CLR_CMAKE_TARGET_ARCH_ARM64) set(JIT_ARCH_SOURCES ${JIT_ARM64_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_ARM64_HEADERS}) elseif(CLR_CMAKE_TARGET_ARCH_S390X) set(JIT_ARCH_SOURCES ${JIT_S390X_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_S390X_HEADERS}) else() clr_unknown_arch() endif() set(JIT_DLL_MAIN_FILE ${CMAKE_CURRENT_LIST_DIR}/dllmain.cpp) if(CLR_CMAKE_TARGET_WIN32) set(CLRJIT_EXPORTS ${CMAKE_CURRENT_LIST_DIR}/ClrJit.exports) set(JIT_EXPORTS_FILE ${CMAKE_CURRENT_BINARY_DIR}/ClrJit.exports.def) preprocess_file (${CLRJIT_EXPORTS} ${JIT_EXPORTS_FILE}) set(JIT_DEF_FILE ${JIT_EXPORTS_FILE}) else() set(CLRJIT_EXPORTS ${CMAKE_CURRENT_LIST_DIR}/ClrJit.PAL.exports) set(JIT_EXPORTS_FILE ${CMAKE_CURRENT_BINARY_DIR}/clrjit.exports) generate_exports_file(${CLRJIT_EXPORTS} ${JIT_EXPORTS_FILE}) if(CLR_CMAKE_TARGET_LINUX OR CLR_CMAKE_TARGET_FREEBSD OR CLR_CMAKE_TARGET_NETBSD OR CLR_CMAKE_TARGET_SUNOS) # This is required to force using our own PAL, not one that we are loaded with. set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Xlinker -Bsymbolic") endif() set_exports_linker_option(${JIT_EXPORTS_FILE}) set(JIT_EXPORTS_LINKER_OPTION ${EXPORTS_LINKER_OPTION}) endif() add_custom_target(jit_exports DEPENDS ${JIT_EXPORTS_FILE}) set(JIT_LINK_LIBRARIES utilcodestaticnohost ) set(JIT_ARCH_LINK_LIBRARIES gcinfo ) if(CLR_CMAKE_HOST_UNIX) list(APPEND JIT_LINK_LIBRARIES mscorrc coreclrpal palrt ) else() list(APPEND JIT_LINK_LIBRARIES ${STATIC_MT_CRT_LIB} ${STATIC_MT_VCRT_LIB} kernel32.lib advapi32.lib ole32.lib oleaut32.lib uuid.lib user32.lib version.lib shlwapi.lib bcrypt.lib crypt32.lib RuntimeObject.lib ) endif(CLR_CMAKE_HOST_UNIX) # Shared function for generating JIT # optional arguments: DESTINATIONS path function(add_jit jitName) set_source_files_properties(${JIT_EXPORTS_FILE} PROPERTIES GENERATED TRUE) if (CLR_CMAKE_TARGET_WIN32) # If generating for Visual Studio then include headers for a better # IDE experience. add_library_clr(${jitName} SHARED ${JIT_SOURCES} ${JIT_ARCH_SOURCES} ${JIT_HEADERS} ${JIT_ARCH_HEADERS} ${JIT_RESOURCES} ${JIT_DEF_FILE} ${JIT_DLL_MAIN_FILE} ) else() add_library_clr(${jitName} SHARED ${JIT_SOURCES} ${JIT_ARCH_SOURCES} ${JIT_RESOURCES} ${JIT_DEF_FILE} ${JIT_DLL_MAIN_FILE} ) endif(CLR_CMAKE_TARGET_WIN32) if(CLR_CMAKE_TARGET_WIN32) target_compile_definitions(${jitName} PRIVATE FX_VER_INTERNALNAME_STR=${jitName}.dll) endif(CLR_CMAKE_TARGET_WIN32) target_include_directories(${jitName} PRIVATE ${JIT_SOURCE_DIR}) target_precompile_headers(${jitName} PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:jitpch.h>") add_dependencies(${jitName} jit_exports) set_property(TARGET ${jitName} APPEND_STRING PROPERTY LINK_FLAGS ${JIT_EXPORTS_LINKER_OPTION}) set_property(TARGET ${jitName} APPEND_STRING PROPERTY LINK_DEPENDS ${JIT_EXPORTS_FILE}) target_link_libraries(${jitName} ${JIT_LINK_LIBRARIES} ${JIT_ARCH_LINK_LIBRARIES} ) if (CLR_CMAKE_HOST_WIN32) link_natvis_sources_for_target(${jitName} PRIVATE clrjit.natvis) endif() # add the install targets install_clr(TARGETS ${jitName} ${ARGN} COMPONENT alljits) endfunction() set(JIT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) # Creates a static library "clrjit_static" to link into the VM. add_subdirectory(static) if (CLR_CMAKE_TARGET_OSX AND CLR_CMAKE_TARGET_ARCH_ARM64) set(TARGET_OS_NAME unix_osx) # Apple Arm64 has a special ABI, distinguish it. elseif (CLR_CMAKE_TARGET_UNIX) set(TARGET_OS_NAME unix) else() set(TARGET_OS_NAME win) endif() create_standalone_jit(TARGET clrjit OS ${TARGET_OS_NAME} ARCH ${ARCH_TARGET_NAME} DESTINATIONS . sharedFramework) install_clr(TARGETS clrjit DESTINATIONS . sharedFramework COMPONENT jit) # Enable profile guided optimization add_pgo(clrjit) if (CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_AMD64) create_standalone_jit(TARGET clrjit_universal_arm64_${ARCH_HOST_NAME} OS universal ARCH arm64 DESTINATIONS .) create_standalone_jit(TARGET clrjit_unix_x64_${ARCH_HOST_NAME} OS unix ARCH x64 DESTINATIONS .) create_standalone_jit(TARGET clrjit_win_x64_${ARCH_HOST_NAME} OS win ARCH x64 DESTINATIONS .) endif (CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_AMD64) create_standalone_jit(TARGET clrjit_universal_arm_${ARCH_HOST_NAME} OS universal ARCH arm DESTINATIONS .) target_compile_definitions(clrjit_universal_arm_${ARCH_HOST_NAME} PRIVATE ARM_SOFTFP CONFIGURABLE_ARM_ABI) create_standalone_jit(TARGET clrjit_win_x86_${ARCH_HOST_NAME} OS win ARCH x86 DESTINATIONS .) if (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX) create_standalone_jit(TARGET clrjit_unix_x86_${ARCH_HOST_NAME} OS unix ARCH x86 DESTINATIONS .) endif (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX) if (CLR_CMAKE_TARGET_UNIX) if (NOT ARCH_TARGET_NAME STREQUAL s390x AND NOT ARCH_TARGET_NAME STREQUAL armv6) if(CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_ARM64) install_clr(TARGETS clrjit_universal_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} DESTINATIONS . COMPONENT jit) else() install_clr(TARGETS clrjit_unix_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} DESTINATIONS . COMPONENT jit) endif() endif(NOT ARCH_TARGET_NAME STREQUAL s390x AND NOT ARCH_TARGET_NAME STREQUAL armv6) endif() if (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_PGO_INSTRUMENT) # Copy PGO dependency to target dir set(PGORT_DLL "pgort140.dll") find_path(PGORT_DIR ${PGORT_DLL} REQUIRED) install(FILES "${PGORT_DIR}/${PGORT_DLL}" DESTINATION ${CMAKE_INSTALL_PREFIX}) install(FILES "${PGORT_DIR}/${PGORT_DLL}" DESTINATION ${CMAKE_INSTALL_PREFIX}/sharedFramework) endif ()
1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/vm/object.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // OBJECT.CPP // // Definitions of a Com+ Object // #include "common.h" #include "vars.hpp" #include "class.h" #include "object.h" #include "threads.h" #include "excep.h" #include "eeconfig.h" #include "gcheaputilities.h" #include "field.h" #include "argdestination.h" SVAL_IMPL(INT32, ArrayBase, s_arrayBoundsZero); // follow the necessary rules to get a new valid hashcode for an object DWORD Object::ComputeHashCode() { DWORD hashCode; // note that this algorithm now uses at most HASHCODE_BITS so that it will // fit into the objheader if the hashcode has to be moved back into the objheader // such as for an object that is being frozen do { // we use the high order bits in this case because they're more random hashCode = GetThread()->GetNewHashCode() >> (32-HASHCODE_BITS); } while (hashCode == 0); // need to enforce hashCode != 0 // verify that it really fits into HASHCODE_BITS _ASSERTE((hashCode & ((1<<HASHCODE_BITS)-1)) == hashCode); return hashCode; } #ifndef DACCESS_COMPILE INT32 Object::GetHashCodeEx() { CONTRACTL { MODE_COOPERATIVE; THROWS; GC_NOTRIGGER; } CONTRACTL_END // This loop exists because we're inspecting the header dword of the object // and it may change under us because of races with other threads. // On top of that, it may have the spin lock bit set, in which case we're // not supposed to change it. // In all of these case, we need to retry the operation. DWORD iter = 0; DWORD dwSwitchCount = 0; while (true) { DWORD bits = GetHeader()->GetBits(); if (bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) { if (bits & BIT_SBLK_IS_HASHCODE) { // Common case: the object already has a hash code return bits & MASK_HASHCODE; } else { // We have a sync block index. This means if we already have a hash code, // it is in the sync block, otherwise we generate a new one and store it there SyncBlock *psb = GetSyncBlock(); DWORD hashCode = psb->GetHashCode(); if (hashCode != 0) return hashCode; hashCode = ComputeHashCode(); return psb->SetHashCode(hashCode); } } else { // If a thread is holding the thin lock we need a syncblock if ((bits & (SBLK_MASK_LOCK_THREADID)) != 0) { GetSyncBlock(); // No need to replicate the above code dealing with sync blocks // here - in the next iteration of the loop, we'll realize // we have a syncblock, and we'll do the right thing. } else { // We want to change the header in this case, so we have to check the BIT_SBLK_SPIN_LOCK bit first if (bits & BIT_SBLK_SPIN_LOCK) { iter++; if ((iter % 1024) != 0 && g_SystemInfo.dwNumberOfProcessors > 1) { YieldProcessorNormalized(); // indicate to the processor that we are spinning } else { __SwitchToThread(0, ++dwSwitchCount); } continue; } DWORD hashCode = ComputeHashCode(); DWORD newBits = bits | BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE | hashCode; if (GetHeader()->SetBits(newBits, bits) == bits) return hashCode; // Header changed under us - let's restart this whole thing. } } } } #endif // #ifndef DACCESS_COMPILE BOOL Object::ValidateObjectWithPossibleAV() { CANNOT_HAVE_CONTRACT; SUPPORTS_DAC; return GetGCSafeMethodTable()->ValidateWithPossibleAV(); } #ifndef DACCESS_COMPILE // There are cases where it is not possible to get a type handle during a GC. // If we can get the type handle, this method will return it. // Otherwise, the method will return NULL. TypeHandle Object::GetGCSafeTypeHandleIfPossible() const { CONTRACTL { NOTHROW; GC_NOTRIGGER; if(!IsGCThread()) { MODE_COOPERATIVE; } } CONTRACTL_END; // Although getting the type handle is unsafe and could cause recursive type lookups // in some cases, it's always safe and straightforward to get to the MethodTable. MethodTable * pMT = GetGCSafeMethodTable(); _ASSERTE(pMT != NULL); if (pMT == g_pFreeObjectMethodTable) { return NULL; } // Don't look at types that belong to an unloading AppDomain, or else // pObj->GetGCSafeTypeHandle() can AV. For example, we encountered this AV when pObj // was an array like this: // // MyValueType1<MyValueType2>[] myArray // // where MyValueType1<T> & MyValueType2 are defined in different assemblies. In such // a case, looking up the type handle for myArray requires looking in // MyValueType1<T>'s module's m_AssemblyRefByNameTable, which is garbage if its // AppDomain is unloading. // // Another AV was encountered in a similar case, // // MyRefType1<MyRefType2>[] myArray // // where MyRefType2's module was unloaded by the time the GC occurred. In at least // one case, the GC was caused by the AD unload itself (AppDomain::Unload -> // AppDomain::Exit -> GCInterface::AddMemoryPressure -> WKS::GCHeapUtilities::GarbageCollect). // // To protect against all scenarios, verify that // // * The MT of the object is not getting unloaded, OR // * In the case of arrays (potentially of arrays of arrays of arrays ...), the // MT of the innermost element is not getting unloaded. This then ensures the // MT of the original object (i.e., array) itself must not be getting // unloaded either, since the MTs of arrays and of their elements are // allocated on the same loader allocator. Module * pLoaderModule = pMT->GetLoaderModule(); // Don't look up types that are unloading due to Collectible Assemblies. Haven't been // able to find a case where we actually encounter objects like this that can cause // problems; however, it seems prudent to add this protection just in case. LoaderAllocator * pLoaderAllocator = pLoaderModule->GetLoaderAllocator(); _ASSERTE(pLoaderAllocator != NULL); if ((pLoaderAllocator->IsCollectible()) && (ObjectHandleIsNull(pLoaderAllocator->GetLoaderAllocatorObjectHandle()))) { return NULL; } // Ok, it should now be safe to get the type handle return GetGCSafeTypeHandle(); } /* static */ BOOL Object::SupportsInterface(OBJECTREF pObj, MethodTable* pInterfaceMT) { CONTRACTL { THROWS; GC_TRIGGERS; INJECT_FAULT(COMPlusThrowOM()); PRECONDITION(CheckPointer(pInterfaceMT)); PRECONDITION(pObj->GetMethodTable()->IsRestored_NoLogging()); PRECONDITION(pInterfaceMT->IsInterface()); } CONTRACTL_END BOOL bSupportsItf = FALSE; GCPROTECT_BEGIN(pObj) { // Make sure the interface method table has been restored. pInterfaceMT->CheckRestore(); // Check to see if the static class definition indicates we implement the interface. MethodTable * pMT = pObj->GetMethodTable(); if (pMT->CanCastToInterface(pInterfaceMT)) { bSupportsItf = TRUE; } #ifdef FEATURE_COMINTEROP else if (pMT->IsComObjectType()) { // If this is a COM object, the static class definition might not be complete so we need // to check if the COM object implements the interface. bSupportsItf = ComObject::SupportsInterface(pObj, pInterfaceMT); } #endif // FEATURE_COMINTEROP } GCPROTECT_END(); return bSupportsItf; } Assembly *AssemblyBaseObject::GetAssembly() { WRAPPER_NO_CONTRACT; return m_pAssembly->GetAssembly(); } STRINGREF AllocateString(SString sstr) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; COUNT_T length = sstr.GetCount(); // count of WCHARs excluding terminating NULL STRINGREF strObj = AllocateString(length); memcpyNoGCRefs(strObj->GetBuffer(), sstr.GetUnicode(), length*sizeof(WCHAR)); return strObj; } CHARARRAYREF AllocateCharArray(DWORD dwArrayLength) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; return (CHARARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_CHAR, dwArrayLength); } void Object::ValidateHeap(BOOL bDeep) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; #if defined (VERIFY_HEAP) //no need to verify next object's header in this case //since this is called in verify_heap, which will verfiy every object anyway Validate(bDeep, FALSE); #endif } void Object::SetOffsetObjectRef(DWORD dwOffset, size_t dwValue) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; OBJECTREF* location; OBJECTREF o; location = (OBJECTREF *) &GetData()[dwOffset]; o = ObjectToOBJECTREF(*(Object **) &dwValue); SetObjectReference( location, o ); } void SetObjectReferenceUnchecked(OBJECTREF *dst,OBJECTREF ref) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_CANNOT_TAKE_LOCK; // Assign value. We use casting to avoid going thru the overloaded // OBJECTREF= operator which in this case would trigger a false // write-barrier violation assert. VolatileStore((Object**)dst, OBJECTREFToObject(ref)); #ifdef _DEBUG Thread::ObjectRefAssign(dst); #endif ErectWriteBarrier(dst, ref); } void STDCALL CopyValueClassUnchecked(void* dest, void* src, MethodTable *pMT) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; _ASSERTE(!pMT->IsArray()); // bunch of assumptions about arrays wrong. if (pMT->ContainsPointers()) { memmoveGCRefs(dest, src, pMT->GetNumInstanceFieldBytes()); } else { switch (pMT->GetNumInstanceFieldBytes()) { case 1: *(UINT8*)dest = *(UINT8*)src; break; #ifndef ALIGN_ACCESS // we can hit an alignment fault if the value type has multiple // smaller fields. Example: if there are two I4 fields, the // value class can be aligned to 4-byte boundaries, yet the // NumInstanceFieldBytes is 8 case 2: *(UINT16*)dest = *(UINT16*)src; break; case 4: *(UINT32*)dest = *(UINT32*)src; break; case 8: *(UINT64*)dest = *(UINT64*)src; break; #endif // !ALIGN_ACCESS default: memcpyNoGCRefs(dest, src, pMT->GetNumInstanceFieldBytes()); break; } } } // Copy value class into the argument specified by the argDest. // The destOffset is nonzero when copying values into Nullable<T>, it is the offset // of the T value inside of the Nullable<T> void STDCALL CopyValueClassArgUnchecked(ArgDestination *argDest, void* src, MethodTable *pMT, int destOffset) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; #if defined(UNIX_AMD64_ABI) if (argDest->IsStructPassedInRegs()) { argDest->CopyStructToRegisters(src, pMT->GetNumInstanceFieldBytes(), destOffset); return; } #elif defined(TARGET_ARM64) if (argDest->IsHFA()) { argDest->CopyHFAStructToRegister(src, pMT->GetNumInstanceFieldBytes()); return; } #endif // UNIX_AMD64_ABI // destOffset is only valid for Nullable<T> passed in registers _ASSERTE(destOffset == 0); CopyValueClassUnchecked(argDest->GetDestinationAddress(), src, pMT); } // Initialize the value class argument to zeros void InitValueClassArg(ArgDestination *argDest, MethodTable *pMT) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; #if defined(UNIX_AMD64_ABI) if (argDest->IsStructPassedInRegs()) { argDest->ZeroStructInRegisters(pMT->GetNumInstanceFieldBytes()); return; } #endif InitValueClass(argDest->GetDestinationAddress(), pMT); } #if defined (VERIFY_HEAP) #include "dbginterface.h" // make the checking code goes as fast as possible! #if defined(_MSC_VER) #pragma optimize("tgy", on) #endif #define CREATE_CHECK_STRING(x) #x #define CHECK_AND_TEAR_DOWN(x) \ do{ \ if (!(x)) \ { \ _ASSERTE(!CREATE_CHECK_STRING(x)); \ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); \ } \ } while (0) VOID Object::Validate(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncBlock) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_CANNOT_TAKE_LOCK; if (g_IBCLogger.InstrEnabled() && !GCStress<cfg_any>::IsEnabled()) { // If we are instrumenting for IBC (and GCStress is not enabled) // then skip these Object::Validate() as they slow down the // instrument phase by an order of magnitude return; } if (g_fEEShutDown & ShutDown_Phase2) { // During second phase of shutdown the code below is not guaranteed to work. return; } #ifdef _DEBUG { Thread *pThread = GetThreadNULLOk(); if (pThread != NULL && !(pThread->PreemptiveGCDisabled())) { // Debugger helper threads are special in that they take over for // what would normally be a nonEE thread (the RCThread). If an // EE thread is doing RCThread duty, then it should be treated // as such. // // There are some GC threads in the same kind of category. Note that // GetThread() sometimes returns them, if DLL_THREAD_ATTACH notifications // have run some managed code. if (!dbgOnly_IsSpecialEEThread() && !IsGCSpecialThread()) _ASSERTE(!"OBJECTREF being accessed while thread is in preemptive GC mode."); } } #endif { // ValidateInner can throw or fault on failure which violates contract. CONTRACT_VIOLATION(ThrowsViolation | FaultViolation); // using inner helper because of TRY and stack objects with destructors. ValidateInner(bDeep, bVerifyNextHeader, bVerifySyncBlock); } } VOID Object::ValidateInner(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncBlock) { STATIC_CONTRACT_THROWS; // See CONTRACT_VIOLATION above STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FAULT; // See CONTRACT_VIOLATION above STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_CANNOT_TAKE_LOCK; int lastTest = 0; EX_TRY { // in order to avoid contract violations in the EH code we'll allow AVs here, // they'll be handled in the catch block AVInRuntimeImplOkayHolder avOk; MethodTable *pMT = GetGCSafeMethodTable(); lastTest = 1; CHECK_AND_TEAR_DOWN(pMT && pMT->Validate()); lastTest = 2; bool noRangeChecks = (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_NO_RANGE_CHECKS) == EEConfig::HEAPVERIFY_NO_RANGE_CHECKS; // noRangeChecks depends on initial values being FALSE BOOL bSmallObjectHeapPtr = FALSE, bLargeObjectHeapPtr = FALSE; if (!noRangeChecks) { bSmallObjectHeapPtr = GCHeapUtilities::GetGCHeap()->IsHeapPointer(this, true); if (!bSmallObjectHeapPtr) bLargeObjectHeapPtr = GCHeapUtilities::GetGCHeap()->IsHeapPointer(this); CHECK_AND_TEAR_DOWN(bSmallObjectHeapPtr || bLargeObjectHeapPtr); } lastTest = 3; if (bDeep) { CHECK_AND_TEAR_DOWN(GetHeader()->Validate(bVerifySyncBlock)); } lastTest = 4; if (bDeep && (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)) { GCHeapUtilities::GetGCHeap()->ValidateObjectMember(this); } lastTest = 5; // since bSmallObjectHeapPtr is initialized to FALSE // we skip checking noRangeChecks since if skipping // is enabled bSmallObjectHeapPtr will always be false. if (bSmallObjectHeapPtr) { CHECK_AND_TEAR_DOWN(!GCHeapUtilities::GetGCHeap()->IsLargeObject(this)); } lastTest = 6; lastTest = 7; _ASSERTE(GCHeapUtilities::IsGCHeapInitialized()); // try to validate next object's header if (bDeep && bVerifyNextHeader && GCHeapUtilities::GetGCHeap()->RuntimeStructuresValid() //NextObj could be very slow if concurrent GC is going on && !GCHeapUtilities::GetGCHeap ()->IsConcurrentGCInProgress ()) { Object * nextObj = GCHeapUtilities::GetGCHeap ()->NextObj (this); if ((nextObj != NULL) && (nextObj->GetGCSafeMethodTable() != g_pFreeObjectMethodTable)) { // we need a read barrier here - to make sure we read the object header _after_ // reading data that tells us that the object is eligible for verification // (also see: gc.cpp/a_fit_segment_end_p) VOLATILE_MEMORY_BARRIER(); CHECK_AND_TEAR_DOWN(nextObj->GetHeader()->Validate(FALSE)); } } lastTest = 8; #ifdef FEATURE_64BIT_ALIGNMENT if (pMT->RequiresAlign8()) { CHECK_AND_TEAR_DOWN((((size_t)this) & 0x7) == (pMT->IsValueType()? 4:0)); } lastTest = 9; #endif // FEATURE_64BIT_ALIGNMENT } EX_CATCH { STRESS_LOG3(LF_ASSERT, LL_ALWAYS, "Detected use of corrupted OBJECTREF: %p [MT=%p] (lastTest=%d)", this, lastTest > 0 ? (*(size_t*)this) : 0, lastTest); CHECK_AND_TEAR_DOWN(!"Detected use of a corrupted OBJECTREF. Possible GC hole."); } EX_END_CATCH(SwallowAllExceptions); } #endif // VERIFY_HEAP /*==================================NewString=================================== **Action: Creates a System.String object. **Returns: **Arguments: **Exceptions: ==============================================================================*/ STRINGREF StringObject::NewString(INT32 length) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(length>=0); } CONTRACTL_END; STRINGREF pString; if (length<0) { return NULL; } else if (length == 0) { return GetEmptyString(); } else { pString = AllocateString(length); _ASSERTE(pString->GetBuffer()[length] == 0); return pString; } } /*==================================NewString=================================== **Action: Many years ago, VB didn't have the concept of a byte array, so enterprising ** users created one by allocating a BSTR with an odd length and using it to ** store bytes. A generation later, we're still stuck supporting this behavior. ** The way that we do this is to take advantage of the difference between the ** array length and the string length. The string length will always be the ** number of characters between the start of the string and the terminating 0. ** If we need an odd number of bytes, we'll take one wchar after the terminating 0. ** (e.g. at position StringLength+1). The high-order byte of this wchar is ** reserved for flags and the low-order byte is our odd byte. This function is ** used to allocate a string of that shape, but we don't actually mark the ** trailing byte as being in use yet. **Returns: A newly allocated string. Null if length is less than 0. **Arguments: length -- the length of the string to allocate ** bHasTrailByte -- whether the string also has a trailing byte. **Exceptions: OutOfMemoryException if AllocateString fails. ==============================================================================*/ STRINGREF StringObject::NewString(INT32 length, BOOL bHasTrailByte) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(length>=0 && length != INT32_MAX); } CONTRACTL_END; STRINGREF pString; if (length<0 || length == INT32_MAX) { return NULL; } else if (length == 0) { return GetEmptyString(); } else { pString = AllocateString(length); _ASSERTE(pString->GetBuffer()[length]==0); if (bHasTrailByte) { _ASSERTE(pString->GetBuffer()[length+1]==0); } } return pString; } //======================================================================== // Creates a System.String object and initializes from // the supplied null-terminated C string. // // Maps NULL to null. This function does *not* return null to indicate // error situations: it throws an exception instead. //======================================================================== STRINGREF StringObject::NewString(const WCHAR *pwsz) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; if (!pwsz) { return NULL; } else { DWORD nch = (DWORD)wcslen(pwsz); if (nch==0) { return GetEmptyString(); } #if 0 // // This assert is disabled because it is valid for us to get a // pointer from the gc heap here as long as it is pinned. This // can happen when a string is marshalled to unmanaged by // pinning and then later put into a struct and that struct is // then marshalled to managed. // _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) pwsz) || !"pwsz can not point to GC Heap"); #endif // 0 STRINGREF pString = AllocateString( nch ); memcpyNoGCRefs(pString->GetBuffer(), pwsz, nch*sizeof(WCHAR)); _ASSERTE(pString->GetBuffer()[nch] == 0); return pString; } } #if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("y", on) // Small critical routines, don't put in EBP frame #endif STRINGREF StringObject::NewString(const WCHAR *pwsz, int length) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(length>=0); } CONTRACTL_END; if (!pwsz) { return NULL; } else if (length <= 0) { return GetEmptyString(); } else { #if 0 // // This assert is disabled because it is valid for us to get a // pointer from the gc heap here as long as it is pinned. This // can happen when a string is marshalled to unmanaged by // pinning and then later put into a struct and that struct is // then marshalled to managed. // _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) pwsz) || !"pwsz can not point to GC Heap"); #endif // 0 STRINGREF pString = AllocateString(length); memcpyNoGCRefs(pString->GetBuffer(), pwsz, length*sizeof(WCHAR)); _ASSERTE(pString->GetBuffer()[length] == 0); return pString; } } #if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("", on) // Go back to command line default optimizations #endif STRINGREF StringObject::NewString(LPCUTF8 psz) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; THROWS; PRECONDITION(CheckPointer(psz)); } CONTRACTL_END; int length = (int)strlen(psz); if (length == 0) { return GetEmptyString(); } CQuickBytes qb; WCHAR* pwsz = (WCHAR*) qb.AllocThrows((length) * sizeof(WCHAR)); length = WszMultiByteToWideChar(CP_UTF8, 0, psz, length, pwsz, length); if (length == 0) { COMPlusThrow(kArgumentException, W("Arg_InvalidUTF8String")); } return NewString(pwsz, length); } STRINGREF StringObject::NewString(LPCUTF8 psz, int cBytes) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; THROWS; PRECONDITION(CheckPointer(psz, NULL_OK)); } CONTRACTL_END; if (!psz) return NULL; _ASSERTE(psz); _ASSERTE(cBytes >= 0); if (cBytes == 0) { return GetEmptyString(); } int cWszBytes = 0; if (!ClrSafeInt<int>::multiply(cBytes, sizeof(WCHAR), cWszBytes)) COMPlusThrowOM(); CQuickBytes qb; WCHAR* pwsz = (WCHAR*) qb.AllocThrows(cWszBytes); int length = WszMultiByteToWideChar(CP_UTF8, 0, psz, cBytes, pwsz, cBytes); if (length == 0) { COMPlusThrow(kArgumentException, W("Arg_InvalidUTF8String")); } return NewString(pwsz, length); } // // // STATIC MEMBER VARIABLES // // STRINGREF* StringObject::EmptyStringRefPtr=NULL; //The special string helpers are used as flag bits for weird strings that have bytes //after the terminating 0. The only case where we use this right now is the VB BSTR as //byte array which is described in MakeStringAsByteArrayFromBytes. #define SPECIAL_STRING_VB_BYTE_ARRAY 0x100 FORCEINLINE BOOL MARKS_VB_BYTE_ARRAY(WCHAR x) { return static_cast<BOOL>(x & SPECIAL_STRING_VB_BYTE_ARRAY); } FORCEINLINE WCHAR MAKE_VB_TRAIL_BYTE(BYTE x) { return static_cast<WCHAR>(x) | SPECIAL_STRING_VB_BYTE_ARRAY; } FORCEINLINE BYTE GET_VB_TRAIL_BYTE(WCHAR x) { return static_cast<BYTE>(x & 0xFF); } /*==============================InitEmptyStringRefPtr============================ **Action: Gets an empty string refptr, cache the result. **Returns: The retrieved STRINGREF. ==============================================================================*/ STRINGREF* StringObject::InitEmptyStringRefPtr() { CONTRACTL { THROWS; MODE_ANY; GC_TRIGGERS; } CONTRACTL_END; GCX_COOP(); EEStringData data(0, W(""), TRUE); EmptyStringRefPtr = SystemDomain::System()->DefaultDomain()->GetLoaderAllocator()->GetStringObjRefPtrFromUnicodeString(&data); return EmptyStringRefPtr; } // strAChars must be null-terminated, with an appropriate aLength // strBChars must be null-terminated, with an appropriate bLength OR bLength == -1 // If bLength == -1, we stop on the first null character in strBChars BOOL StringObject::CaseInsensitiveCompHelper(_In_reads_(aLength) WCHAR *strAChars, _In_z_ INT8 *strBChars, INT32 aLength, INT32 bLength, INT32 *result) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(strAChars)); PRECONDITION(CheckPointer(strBChars)); PRECONDITION(CheckPointer(result)); } CONTRACTL_END; WCHAR *strAStart = strAChars; INT8 *strBStart = strBChars; unsigned charA; unsigned charB; for(;;) { charA = *strAChars; charB = (unsigned) *strBChars; //Case-insensitive comparison on chars greater than 0x7F //requires a locale-aware casing operation and we're not going there. if ((charA|charB)>0x7F) { *result = 0; return FALSE; } // uppercase both chars. if (charA>='a' && charA<='z') { charA ^= 0x20; } if (charB>='a' && charB<='z') { charB ^= 0x20; } //Return the (case-insensitive) difference between them. if (charA!=charB) { *result = (int)(charA-charB); return TRUE; } if (charA==0) // both strings have null character { if (bLength == -1) { *result = aLength - static_cast<INT32>(strAChars - strAStart); return TRUE; } if (strAChars==strAStart + aLength || strBChars==strBStart + bLength) { *result = aLength - bLength; return TRUE; } // else both embedded zeros } // Next char strAChars++; strBChars++; } } /*============================InternalTrailByteCheck============================ **Action: Many years ago, VB didn't have the concept of a byte array, so enterprising ** users created one by allocating a BSTR with an odd length and using it to ** store bytes. A generation later, we're still stuck supporting this behavior. ** The way that we do this is stick the trail byte in the sync block ** whenever we encounter such a situation. Since we expect this to be a very corner case ** accessing the sync block seems like a good enough solution ** **Returns: True if <CODE>str</CODE> contains a VB trail byte, false otherwise. **Arguments: str -- The string to be examined. **Exceptions: None ==============================================================================*/ BOOL StringObject::HasTrailByte() { WRAPPER_NO_CONTRACT; SyncBlock * pSyncBlock = PassiveGetSyncBlock(); if(pSyncBlock != NULL) { return pSyncBlock->HasCOMBstrTrailByte(); } return FALSE; } /*=================================GetTrailByte================================= **Action: If <CODE>str</CODE> contains a vb trail byte, returns a copy of it. **Returns: True if <CODE>str</CODE> contains a trail byte. *bTrailByte is set to ** the byte in question if <CODE>str</CODE> does have a trail byte, otherwise ** it's set to 0. **Arguments: str -- The string being examined. ** bTrailByte -- An out param to hold the value of the trail byte. **Exceptions: None. ==============================================================================*/ BOOL StringObject::GetTrailByte(BYTE *bTrailByte) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(bTrailByte); *bTrailByte=0; BOOL retValue = HasTrailByte(); if(retValue) { *bTrailByte = GET_VB_TRAIL_BYTE(GetHeader()->PassiveGetSyncBlock()->GetCOMBstrTrailByte()); } return retValue; } /*=================================SetTrailByte================================= **Action: Sets the trail byte in the sync block **Returns: True. **Arguments: str -- The string into which to set the trail byte. ** bTrailByte -- The trail byte to be added to the string. **Exceptions: None. ==============================================================================*/ BOOL StringObject::SetTrailByte(BYTE bTrailByte) { WRAPPER_NO_CONTRACT; GetHeader()->GetSyncBlock()->SetCOMBstrTrailByte(MAKE_VB_TRAIL_BYTE(bTrailByte)); return TRUE; } #ifdef USE_CHECKED_OBJECTREFS //------------------------------------------------------------- // Default constructor, for non-initializing declarations: // // OBJECTREF or; //------------------------------------------------------------- OBJECTREF::OBJECTREF() { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; m_asObj = (Object*)POISONC; Thread::ObjectRefNew(this); } //------------------------------------------------------------- // Copy constructor, for passing OBJECTREF's as function arguments. //------------------------------------------------------------- OBJECTREF::OBJECTREF(const OBJECTREF & objref) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_FORBID_FAULT; VALIDATEOBJECT(objref.m_asObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(&objref)); if ((objref.m_asObj != 0) && ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this )) { _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!"); } m_asObj = objref.m_asObj; if (m_asObj != 0) { ENABLESTRESSHEAP(); } Thread::ObjectRefNew(this); } //------------------------------------------------------------- // VolatileLoadWithoutBarrier constructor //------------------------------------------------------------- OBJECTREF::OBJECTREF(const OBJECTREF *pObjref, tagVolatileLoadWithoutBarrier tag) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_FORBID_FAULT; Object* objrefAsObj = VolatileLoadWithoutBarrier(&pObjref->m_asObj); VALIDATEOBJECT(objrefAsObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(pObjref)); if ((objrefAsObj != 0) && ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this )) { _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!"); } m_asObj = objrefAsObj; if (m_asObj != 0) { ENABLESTRESSHEAP(); } Thread::ObjectRefNew(this); } //------------------------------------------------------------- // To allow NULL to be used as an OBJECTREF. //------------------------------------------------------------- OBJECTREF::OBJECTREF(TADDR nul) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; //_ASSERTE(nul == 0); m_asObj = (Object*)nul; if( m_asObj != NULL) { // REVISIT_TODO: fix this, why is this constructor being used for non-null object refs? STATIC_CONTRACT_VIOLATION(ModeViolation); VALIDATEOBJECT(m_asObj); ENABLESTRESSHEAP(); } Thread::ObjectRefNew(this); } //------------------------------------------------------------- // This is for the GC's use only. Non-GC code should never // use the "Object" class directly. The unused "int" argument // prevents C++ from using this to implicitly convert Object*'s // to OBJECTREF. //------------------------------------------------------------- OBJECTREF::OBJECTREF(Object *pObject) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_FORBID_FAULT; DEBUG_ONLY_FUNCTION; if ((pObject != 0) && ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this )) { _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!"); } m_asObj = pObject; VALIDATEOBJECT(m_asObj); if (m_asObj != 0) { ENABLESTRESSHEAP(); } Thread::ObjectRefNew(this); } void OBJECTREF::Validate(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncBlock) { LIMITED_METHOD_CONTRACT; if (m_asObj) { m_asObj->Validate(bDeep, bVerifyNextHeader, bVerifySyncBlock); } } //------------------------------------------------------------- // Test against NULL. //------------------------------------------------------------- int OBJECTREF::operator!() const { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; // We don't do any validation here, as we want to allow zero comparison in preemptive mode return !m_asObj; } //------------------------------------------------------------- // Compare two OBJECTREF's. //------------------------------------------------------------- int OBJECTREF::operator==(const OBJECTREF &objref) const { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; if (objref.m_asObj != NULL) // Allow comparison to zero in preemptive mode { // REVISIT_TODO: Weakening the contract system a little bit here. We should really // add a special NULLOBJECTREF which can be used for these situations and have // a seperate code path for that with the correct contract protections. STATIC_CONTRACT_VIOLATION(ModeViolation); VALIDATEOBJECT(objref.m_asObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(&objref)); VALIDATEOBJECT(m_asObj); // If this assert fires, you probably did not protect // your OBJECTREF and a GC might have occurred. To // where the possible GC was, set a breakpoint in Thread::TriggersGC _ASSERTE(Thread::IsObjRefValid(this)); if (m_asObj != 0 || objref.m_asObj != 0) { ENABLESTRESSHEAP(); } } return m_asObj == objref.m_asObj; } //------------------------------------------------------------- // Compare two OBJECTREF's. //------------------------------------------------------------- int OBJECTREF::operator!=(const OBJECTREF &objref) const { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; if (objref.m_asObj != NULL) // Allow comparison to zero in preemptive mode { // REVISIT_TODO: Weakening the contract system a little bit here. We should really // add a special NULLOBJECTREF which can be used for these situations and have // a seperate code path for that with the correct contract protections. STATIC_CONTRACT_VIOLATION(ModeViolation); VALIDATEOBJECT(objref.m_asObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(&objref)); VALIDATEOBJECT(m_asObj); // If this assert fires, you probably did not protect // your OBJECTREF and a GC might have occurred. To // where the possible GC was, set a breakpoint in Thread::TriggersGC _ASSERTE(Thread::IsObjRefValid(this)); if (m_asObj != 0 || objref.m_asObj != 0) { ENABLESTRESSHEAP(); } } return m_asObj != objref.m_asObj; } //------------------------------------------------------------- // Forward method calls. //------------------------------------------------------------- Object* OBJECTREF::operator->() { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; VALIDATEOBJECT(m_asObj); // If this assert fires, you probably did not protect // your OBJECTREF and a GC might have occurred. To // where the possible GC was, set a breakpoint in Thread::TriggersGC _ASSERTE(Thread::IsObjRefValid(this)); if (m_asObj != 0) { ENABLESTRESSHEAP(); } // if you are using OBJECTREF directly, // you probably want an Object * return (Object *)m_asObj; } //------------------------------------------------------------- // Forward method calls. //------------------------------------------------------------- const Object* OBJECTREF::operator->() const { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; VALIDATEOBJECT(m_asObj); // If this assert fires, you probably did not protect // your OBJECTREF and a GC might have occurred. To // where the possible GC was, set a breakpoint in Thread::TriggersGC _ASSERTE(Thread::IsObjRefValid(this)); if (m_asObj != 0) { ENABLESTRESSHEAP(); } // if you are using OBJECTREF directly, // you probably want an Object * return (Object *)m_asObj; } //------------------------------------------------------------- // Assignment. We don't validate the destination so as not // to break the sequence: // // OBJECTREF or; // or = ...; //------------------------------------------------------------- OBJECTREF& OBJECTREF::operator=(const OBJECTREF &objref) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; VALIDATEOBJECT(objref.m_asObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(&objref)); if ((objref.m_asObj != 0) && ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this )) { _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!"); } Thread::ObjectRefAssign(this); m_asObj = objref.m_asObj; if (m_asObj != 0) { ENABLESTRESSHEAP(); } return *this; } //------------------------------------------------------------- // Allows for the assignment of NULL to a OBJECTREF //------------------------------------------------------------- OBJECTREF& OBJECTREF::operator=(TADDR nul) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; _ASSERTE(nul == 0); Thread::ObjectRefAssign(this); m_asObj = (Object*)nul; if (m_asObj != 0) { ENABLESTRESSHEAP(); } return *this; } #endif // DEBUG #ifdef _DEBUG void* __cdecl GCSafeMemCpy(void * dest, const void * src, size_t len) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; if (!(((*(BYTE**)&dest) < g_lowest_address ) || ((*(BYTE**)&dest) >= g_highest_address))) { Thread* pThread = GetThreadNULLOk(); // GCHeapUtilities::IsHeapPointer has race when called in preemptive mode. It walks the list of segments // that can be modified by GC. Do the check below only if it is safe to do so. if (pThread != NULL && pThread->PreemptiveGCDisabled()) { // Note there is memcpyNoGCRefs which will allow you to do a memcpy into the GC // heap if you really know you don't need to call the write barrier _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) dest) || !"using memcpy to copy into the GC heap, use CopyValueClass"); } } return memcpyNoGCRefs(dest, src, len); } #endif // _DEBUG // This function clears a piece of memory in a GC safe way. It makes the guarantee // that it will clear memory in at least pointer sized chunks whenever possible. // Unaligned memory at the beginning and remaining bytes at the end are written bytewise. // We must make this guarantee whenever we clear memory in the GC heap that could contain // object references. The GC or other user threads can read object references at any time, // clearing them bytewise can result in a read on another thread getting incorrect data. void __fastcall ZeroMemoryInGCHeap(void* mem, size_t size) { WRAPPER_NO_CONTRACT; BYTE* memBytes = (BYTE*) mem; BYTE* endBytes = &memBytes[size]; // handle unaligned bytes at the beginning while (!IS_ALIGNED(memBytes, sizeof(PTR_PTR_VOID)) && memBytes < endBytes) *memBytes++ = 0; // now write pointer sized pieces // volatile ensures that this doesn't get optimized back into a memset call size_t nPtrs = (endBytes - memBytes) / sizeof(PTR_PTR_VOID); PTR_VOID volatile * memPtr = (PTR_PTR_VOID) memBytes; for (size_t i = 0; i < nPtrs; i++) *memPtr++ = 0; // handle remaining bytes at the end memBytes = (BYTE*) memPtr; while (memBytes < endBytes) *memBytes++ = 0; } void StackTraceArray::Append(StackTraceElement const * begin, StackTraceElement const * end) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(IsProtectedByGCFrame((OBJECTREF*)this)); } CONTRACTL_END; // ensure that only one thread can write to the array EnsureThreadAffinity(); size_t newsize = Size() + (end - begin); Grow(newsize); memcpyNoGCRefs(GetData() + Size(), begin, (end - begin) * sizeof(StackTraceElement)); MemoryBarrier(); // prevent the newsize from being reordered with the array copy SetSize(newsize); #if defined(_DEBUG) CheckState(); #endif } void StackTraceArray::CheckState() const { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; if (!m_array) return; assert(GetObjectThread() == GetThreadNULLOk()); size_t size = Size(); StackTraceElement const * p; p = GetData(); for (size_t i = 0; i < size; ++i) assert(p[i].pFunc != NULL); } void StackTraceArray::Grow(size_t grow_size) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; INJECT_FAULT(ThrowOutOfMemory();); PRECONDITION(IsProtectedByGCFrame((OBJECTREF*)this)); } CONTRACTL_END; size_t raw_size = grow_size * sizeof(StackTraceElement) + sizeof(ArrayHeader); if (!m_array) { SetArray(I1ARRAYREF(AllocatePrimitiveArray(ELEMENT_TYPE_I1, static_cast<DWORD>(raw_size)))); SetSize(0); SetObjectThread(); } else { if (Capacity() >= raw_size) return; // allocate a new array, copy the data size_t new_capacity = Max(Capacity() * 2, raw_size); _ASSERTE(new_capacity >= grow_size * sizeof(StackTraceElement) + sizeof(ArrayHeader)); I1ARRAYREF newarr = (I1ARRAYREF) AllocatePrimitiveArray(ELEMENT_TYPE_I1, static_cast<DWORD>(new_capacity)); memcpyNoGCRefs(newarr->GetDirectPointerToNonObjectElements(), GetRaw(), Size() * sizeof(StackTraceElement) + sizeof(ArrayHeader)); SetArray(newarr); } } void StackTraceArray::EnsureThreadAffinity() { WRAPPER_NO_CONTRACT; if (!m_array) return; if (GetObjectThread() != GetThreadNULLOk()) { // object is being changed by a thread different from the one which created it // make a copy of the array to prevent a race condition when two different threads try to change it StackTraceArray copy; GCPROTECT_BEGIN(copy); copy.CopyFrom(*this); this->Swap(copy); GCPROTECT_END(); } } // Deep copies the stack trace array void StackTraceArray::CopyFrom(StackTraceArray const & src) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; INJECT_FAULT(ThrowOutOfMemory();); PRECONDITION(IsProtectedByGCFrame((OBJECTREF*)this)); PRECONDITION(IsProtectedByGCFrame((OBJECTREF*)&src)); } CONTRACTL_END; m_array = (I1ARRAYREF) AllocatePrimitiveArray(ELEMENT_TYPE_I1, static_cast<DWORD>(src.Capacity())); Volatile<size_t> size = src.Size(); memcpyNoGCRefs(GetRaw(), src.GetRaw(), size * sizeof(StackTraceElement) + sizeof(ArrayHeader)); SetSize(size); // set size to the exact value which was used when we copied the data // another thread might have changed it at the time of copying SetObjectThread(); // affinitize the newly created array with the current thread } #ifdef _DEBUG //=============================================================================== // Code that insures that our unmanaged version of Nullable is consistant with // the managed version Nullable<T> for all T. void Nullable::CheckFieldOffsets(TypeHandle nullableType) { LIMITED_METHOD_CONTRACT; /*** // The non-instantiated method tables like List<T> that are used // by reflection and verification do not have correct field offsets // but we never make instances of these anyway. if (nullableMT->ContainsGenericVariables()) return; ***/ MethodTable* nullableMT = nullableType.GetMethodTable(); // insure that the managed version of the table is the same as the // unmanaged. Note that we can't do this in corelib.h because this // class is generic and field layout depends on the instantiation. _ASSERTE(nullableMT->GetNumInstanceFields() == 2); FieldDesc* field = nullableMT->GetApproxFieldDescListRaw(); _ASSERTE(strcmp(field->GetDebugName(), "hasValue") == 0); // _ASSERTE(field->GetOffset() == offsetof(Nullable, hasValue)); field++; _ASSERTE(strcmp(field->GetDebugName(), "value") == 0); // _ASSERTE(field->GetOffset() == offsetof(Nullable, value)); } #endif //=============================================================================== // Returns true if nullableMT is Nullable<T> for T is equivalent to paramMT BOOL Nullable::IsNullableForTypeHelper(MethodTable* nullableMT, MethodTable* paramMT) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; if (!nullableMT->IsNullable()) return FALSE; // we require the parameter types to be equivalent return TypeHandle(paramMT).IsEquivalentTo(nullableMT->GetInstantiation()[0]); } //=============================================================================== // Returns true if nullableMT is Nullable<T> for T == paramMT BOOL Nullable::IsNullableForTypeHelperNoGC(MethodTable* nullableMT, MethodTable* paramMT) { LIMITED_METHOD_CONTRACT; if (!nullableMT->IsNullable()) return FALSE; // we require an exact match of the parameter types return TypeHandle(paramMT) == nullableMT->GetInstantiation()[0]; } //=============================================================================== CLR_BOOL* Nullable::HasValueAddr(MethodTable* nullableMT) { LIMITED_METHOD_CONTRACT; _ASSERTE(strcmp(nullableMT->GetApproxFieldDescListRaw()[0].GetDebugName(), "hasValue") == 0); _ASSERTE(nullableMT->GetApproxFieldDescListRaw()[0].GetOffset() == 0); return (CLR_BOOL*) this; } //=============================================================================== void* Nullable::ValueAddr(MethodTable* nullableMT) { LIMITED_METHOD_CONTRACT; _ASSERTE(strcmp(nullableMT->GetApproxFieldDescListRaw()[1].GetDebugName(), "value") == 0); return (((BYTE*) this) + nullableMT->GetApproxFieldDescListRaw()[1].GetOffset()); } //=============================================================================== // Special Logic to box a nullable<T> as a boxed<T> OBJECTREF Nullable::Box(void* srcPtr, MethodTable* nullableMT) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; FAULT_NOT_FATAL(); // FIX_NOW: why do we need this? Nullable* src = (Nullable*) srcPtr; _ASSERTE(IsNullableType(nullableMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!nullableMT->ContainsGenericVariables()); if (!*src->HasValueAddr(nullableMT)) return NULL; OBJECTREF obj = 0; GCPROTECT_BEGININTERIOR (src); MethodTable* argMT = nullableMT->GetInstantiation()[0].AsMethodTable(); obj = argMT->Allocate(); CopyValueClass(obj->UnBox(), src->ValueAddr(nullableMT), argMT); GCPROTECT_END (); return obj; } //=============================================================================== // Special Logic to unbox a boxed T as a nullable<T> BOOL Nullable::UnBox(void* destPtr, OBJECTREF boxedVal, MethodTable* destMT) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; Nullable* dest = (Nullable*) destPtr; BOOL fRet = TRUE; // We should only get here if we are unboxing a T as a Nullable<T> _ASSERTE(IsNullableType(destMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!destMT->ContainsGenericVariables()); if (boxedVal == NULL) { // Logically we are doing *dest->HasValueAddr(destMT) = false; // We zero out the whole structure becasue it may contain GC references // and these need to be initialized to zero. (could optimize in the non-GC case) InitValueClass(destPtr, destMT); fRet = TRUE; } else { GCPROTECT_BEGIN(boxedVal); if (!IsNullableForType(destMT, boxedVal->GetMethodTable())) { // For safety's sake, also allow true nullables to be unboxed normally. // This should not happen normally, but we want to be robust if (destMT->IsEquivalentTo(boxedVal->GetMethodTable())) { CopyValueClass(dest, boxedVal->GetData(), destMT); fRet = TRUE; } else { fRet = FALSE; } } else { *dest->HasValueAddr(destMT) = true; CopyValueClass(dest->ValueAddr(destMT), boxedVal->UnBox(), boxedVal->GetMethodTable()); fRet = TRUE; } GCPROTECT_END(); } return fRet; } //=============================================================================== // Special Logic to unbox a boxed T as a nullable<T> // Does not handle type equivalence (may conservatively return FALSE) BOOL Nullable::UnBoxNoGC(void* destPtr, OBJECTREF boxedVal, MethodTable* destMT) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; Nullable* dest = (Nullable*) destPtr; // We should only get here if we are unboxing a T as a Nullable<T> _ASSERTE(IsNullableType(destMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!destMT->ContainsGenericVariables()); if (boxedVal == NULL) { // Logically we are doing *dest->HasValueAddr(destMT) = false; // We zero out the whole structure becasue it may contain GC references // and these need to be initialized to zero. (could optimize in the non-GC case) InitValueClass(destPtr, destMT); } else { if (!IsNullableForTypeNoGC(destMT, boxedVal->GetMethodTable())) { // For safety's sake, also allow true nullables to be unboxed normally. // This should not happen normally, but we want to be robust if (destMT == boxedVal->GetMethodTable()) { CopyValueClass(dest, boxedVal->GetData(), destMT); return TRUE; } return FALSE; } *dest->HasValueAddr(destMT) = true; CopyValueClass(dest->ValueAddr(destMT), boxedVal->UnBox(), boxedVal->GetMethodTable()); } return TRUE; } //=============================================================================== // Special Logic to unbox a boxed T as a nullable<T> into an argument // specified by the argDest. // Does not handle type equivalence (may conservatively return FALSE) BOOL Nullable::UnBoxIntoArgNoGC(ArgDestination *argDest, OBJECTREF boxedVal, MethodTable* destMT) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; #if defined(UNIX_AMD64_ABI) if (argDest->IsStructPassedInRegs()) { // We should only get here if we are unboxing a T as a Nullable<T> _ASSERTE(IsNullableType(destMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!destMT->ContainsGenericVariables()); if (boxedVal == NULL) { // Logically we are doing *dest->HasValueAddr(destMT) = false; // We zero out the whole structure becasue it may contain GC references // and these need to be initialized to zero. (could optimize in the non-GC case) InitValueClassArg(argDest, destMT); } else { if (!IsNullableForTypeNoGC(destMT, boxedVal->GetMethodTable())) { // For safety's sake, also allow true nullables to be unboxed normally. // This should not happen normally, but we want to be robust if (destMT == boxedVal->GetMethodTable()) { CopyValueClassArg(argDest, boxedVal->GetData(), destMT, 0); return TRUE; } return FALSE; } Nullable* dest = (Nullable*)argDest->GetStructGenRegDestinationAddress(); *dest->HasValueAddr(destMT) = true; int destOffset = (BYTE*)dest->ValueAddr(destMT) - (BYTE*)dest; CopyValueClassArg(argDest, boxedVal->UnBox(), boxedVal->GetMethodTable(), destOffset); } return TRUE; } #endif // UNIX_AMD64_ABI return UnBoxNoGC(argDest->GetDestinationAddress(), boxedVal, destMT); } //=============================================================================== // Special Logic to unbox a boxed T as a nullable<T> // Does not do any type checks. void Nullable::UnBoxNoCheck(void* destPtr, OBJECTREF boxedVal, MethodTable* destMT) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; Nullable* dest = (Nullable*) destPtr; // We should only get here if we are unboxing a T as a Nullable<T> _ASSERTE(IsNullableType(destMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!destMT->ContainsGenericVariables()); if (boxedVal == NULL) { // Logically we are doing *dest->HasValueAddr(destMT) = false; // We zero out the whole structure becasue it may contain GC references // and these need to be initialized to zero. (could optimize in the non-GC case) InitValueClass(destPtr, destMT); } else { if (IsNullableType(boxedVal->GetMethodTable())) { // For safety's sake, also allow true nullables to be unboxed normally. // This should not happen normally, but we want to be robust CopyValueClass(dest, boxedVal->GetData(), destMT); } *dest->HasValueAddr(destMT) = true; CopyValueClass(dest->ValueAddr(destMT), boxedVal->UnBox(), boxedVal->GetMethodTable()); } } //=============================================================================== // a boxed Nullable<T> should either be null or a boxed T, but sometimes it is // useful to have a 'true' boxed Nullable<T> (that is it has two fields). This // function returns a 'normalized' version of this pointer. OBJECTREF Nullable::NormalizeBox(OBJECTREF obj) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; if (obj != NULL) { MethodTable* retMT = obj->GetMethodTable(); if (Nullable::IsNullableType(retMT)) obj = Nullable::Box(obj->GetData(), retMT); } return obj; } void ThreadBaseObject::SetInternal(Thread *it) { WRAPPER_NO_CONTRACT; // only allow a transition from NULL to non-NULL _ASSERTE((m_InternalThread == NULL) && (it != NULL)); m_InternalThread = it; // Now the native Thread will only be destroyed after the managed Thread is collected. // Tell the GC that the managed Thread actually represents much more memory. GCInterface::AddMemoryPressure(sizeof(Thread)); } void ThreadBaseObject::ClearInternal() { WRAPPER_NO_CONTRACT; _ASSERTE(m_InternalThread != NULL); m_InternalThread = NULL; GCInterface::RemoveMemoryPressure(sizeof(Thread)); } #endif // #ifndef DACCESS_COMPILE StackTraceElement const & StackTraceArray::operator[](size_t index) const { WRAPPER_NO_CONTRACT; return GetData()[index]; } StackTraceElement & StackTraceArray::operator[](size_t index) { WRAPPER_NO_CONTRACT; return GetData()[index]; } #if !defined(DACCESS_COMPILE) // Define the lock used to access stacktrace from an exception object SpinLock g_StackTraceArrayLock; void ExceptionObject::SetStackTrace(I1ARRAYREF stackTrace, PTRARRAYREF dynamicMethodArray) { CONTRACTL { GC_NOTRIGGER; NOTHROW; MODE_COOPERATIVE; } CONTRACTL_END; #ifdef STRESS_LOG if (StressLog::StressLogOn(~0u, 0)) { StressLog::CreateThreadStressLog(); } #endif SpinLock::AcquireLock(&g_StackTraceArrayLock); SetObjectReference((OBJECTREF*)&_stackTrace, (OBJECTREF)stackTrace); SetObjectReference((OBJECTREF*)&_dynamicMethods, (OBJECTREF)dynamicMethodArray); SpinLock::ReleaseLock(&g_StackTraceArrayLock); } #endif // !defined(DACCESS_COMPILE) void ExceptionObject::GetStackTrace(StackTraceArray & stackTrace, PTRARRAYREF * outDynamicMethodArray /*= NULL*/) const { CONTRACTL { GC_NOTRIGGER; NOTHROW; MODE_COOPERATIVE; } CONTRACTL_END; #if !defined(DACCESS_COMPILE) SpinLock::AcquireLock(&g_StackTraceArrayLock); #endif // !defined(DACCESS_COMPILE) StackTraceArray temp(_stackTrace); stackTrace.Swap(temp); if (outDynamicMethodArray != NULL) { *outDynamicMethodArray = _dynamicMethods; } #if !defined(DACCESS_COMPILE) SpinLock::ReleaseLock(&g_StackTraceArrayLock); #endif // !defined(DACCESS_COMPILE) } bool LAHashDependentHashTrackerObject::IsLoaderAllocatorLive() { return (ObjectFromHandle(_dependentHandle) != NULL); } void LAHashDependentHashTrackerObject::GetDependentAndLoaderAllocator(OBJECTREF *pLoaderAllocatorRef, GCHEAPHASHOBJECTREF *pGCHeapHash) { OBJECTREF primary = ObjectFromHandle(_dependentHandle); if (pLoaderAllocatorRef != NULL) *pLoaderAllocatorRef = primary; IGCHandleManager *mgr = GCHandleUtilities::GetGCHandleManager(); // Secondary is tracked only if primary is non-null if (pGCHeapHash != NULL) *pGCHeapHash = (GCHEAPHASHOBJECTREF)(OBJECTREF)((primary != NULL) ? mgr->GetDependentHandleSecondary(_dependentHandle) : NULL); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // OBJECT.CPP // // Definitions of a Com+ Object // #include "common.h" #include "vars.hpp" #include "class.h" #include "object.h" #include "threads.h" #include "excep.h" #include "eeconfig.h" #include "gcheaputilities.h" #include "field.h" #include "argdestination.h" SVAL_IMPL(INT32, ArrayBase, s_arrayBoundsZero); // follow the necessary rules to get a new valid hashcode for an object DWORD Object::ComputeHashCode() { DWORD hashCode; // note that this algorithm now uses at most HASHCODE_BITS so that it will // fit into the objheader if the hashcode has to be moved back into the objheader // such as for an object that is being frozen do { // we use the high order bits in this case because they're more random hashCode = GetThread()->GetNewHashCode() >> (32-HASHCODE_BITS); } while (hashCode == 0); // need to enforce hashCode != 0 // verify that it really fits into HASHCODE_BITS _ASSERTE((hashCode & ((1<<HASHCODE_BITS)-1)) == hashCode); return hashCode; } #ifndef DACCESS_COMPILE INT32 Object::GetHashCodeEx() { CONTRACTL { MODE_COOPERATIVE; THROWS; GC_NOTRIGGER; } CONTRACTL_END // This loop exists because we're inspecting the header dword of the object // and it may change under us because of races with other threads. // On top of that, it may have the spin lock bit set, in which case we're // not supposed to change it. // In all of these case, we need to retry the operation. DWORD iter = 0; DWORD dwSwitchCount = 0; while (true) { DWORD bits = GetHeader()->GetBits(); if (bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) { if (bits & BIT_SBLK_IS_HASHCODE) { // Common case: the object already has a hash code return bits & MASK_HASHCODE; } else { // We have a sync block index. This means if we already have a hash code, // it is in the sync block, otherwise we generate a new one and store it there SyncBlock *psb = GetSyncBlock(); DWORD hashCode = psb->GetHashCode(); if (hashCode != 0) return hashCode; hashCode = ComputeHashCode(); return psb->SetHashCode(hashCode); } } else { // If a thread is holding the thin lock we need a syncblock if ((bits & (SBLK_MASK_LOCK_THREADID)) != 0) { GetSyncBlock(); // No need to replicate the above code dealing with sync blocks // here - in the next iteration of the loop, we'll realize // we have a syncblock, and we'll do the right thing. } else { // We want to change the header in this case, so we have to check the BIT_SBLK_SPIN_LOCK bit first if (bits & BIT_SBLK_SPIN_LOCK) { iter++; if ((iter % 1024) != 0 && g_SystemInfo.dwNumberOfProcessors > 1) { YieldProcessorNormalized(); // indicate to the processor that we are spinning } else { __SwitchToThread(0, ++dwSwitchCount); } continue; } DWORD hashCode = ComputeHashCode(); DWORD newBits = bits | BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE | hashCode; if (GetHeader()->SetBits(newBits, bits) == bits) return hashCode; // Header changed under us - let's restart this whole thing. } } } } #endif // #ifndef DACCESS_COMPILE BOOL Object::ValidateObjectWithPossibleAV() { CANNOT_HAVE_CONTRACT; SUPPORTS_DAC; return GetGCSafeMethodTable()->ValidateWithPossibleAV(); } #ifndef DACCESS_COMPILE // There are cases where it is not possible to get a type handle during a GC. // If we can get the type handle, this method will return it. // Otherwise, the method will return NULL. TypeHandle Object::GetGCSafeTypeHandleIfPossible() const { CONTRACTL { NOTHROW; GC_NOTRIGGER; if(!IsGCThread()) { MODE_COOPERATIVE; } } CONTRACTL_END; // Although getting the type handle is unsafe and could cause recursive type lookups // in some cases, it's always safe and straightforward to get to the MethodTable. MethodTable * pMT = GetGCSafeMethodTable(); _ASSERTE(pMT != NULL); if (pMT == g_pFreeObjectMethodTable) { return NULL; } // Don't look at types that belong to an unloading AppDomain, or else // pObj->GetGCSafeTypeHandle() can AV. For example, we encountered this AV when pObj // was an array like this: // // MyValueType1<MyValueType2>[] myArray // // where MyValueType1<T> & MyValueType2 are defined in different assemblies. In such // a case, looking up the type handle for myArray requires looking in // MyValueType1<T>'s module's m_AssemblyRefByNameTable, which is garbage if its // AppDomain is unloading. // // Another AV was encountered in a similar case, // // MyRefType1<MyRefType2>[] myArray // // where MyRefType2's module was unloaded by the time the GC occurred. In at least // one case, the GC was caused by the AD unload itself (AppDomain::Unload -> // AppDomain::Exit -> GCInterface::AddMemoryPressure -> WKS::GCHeapUtilities::GarbageCollect). // // To protect against all scenarios, verify that // // * The MT of the object is not getting unloaded, OR // * In the case of arrays (potentially of arrays of arrays of arrays ...), the // MT of the innermost element is not getting unloaded. This then ensures the // MT of the original object (i.e., array) itself must not be getting // unloaded either, since the MTs of arrays and of their elements are // allocated on the same loader allocator. Module * pLoaderModule = pMT->GetLoaderModule(); // Don't look up types that are unloading due to Collectible Assemblies. Haven't been // able to find a case where we actually encounter objects like this that can cause // problems; however, it seems prudent to add this protection just in case. LoaderAllocator * pLoaderAllocator = pLoaderModule->GetLoaderAllocator(); _ASSERTE(pLoaderAllocator != NULL); if ((pLoaderAllocator->IsCollectible()) && (ObjectHandleIsNull(pLoaderAllocator->GetLoaderAllocatorObjectHandle()))) { return NULL; } // Ok, it should now be safe to get the type handle return GetGCSafeTypeHandle(); } /* static */ BOOL Object::SupportsInterface(OBJECTREF pObj, MethodTable* pInterfaceMT) { CONTRACTL { THROWS; GC_TRIGGERS; INJECT_FAULT(COMPlusThrowOM()); PRECONDITION(CheckPointer(pInterfaceMT)); PRECONDITION(pObj->GetMethodTable()->IsRestored_NoLogging()); PRECONDITION(pInterfaceMT->IsInterface()); } CONTRACTL_END BOOL bSupportsItf = FALSE; GCPROTECT_BEGIN(pObj) { // Make sure the interface method table has been restored. pInterfaceMT->CheckRestore(); // Check to see if the static class definition indicates we implement the interface. MethodTable * pMT = pObj->GetMethodTable(); if (pMT->CanCastToInterface(pInterfaceMT)) { bSupportsItf = TRUE; } #ifdef FEATURE_COMINTEROP else if (pMT->IsComObjectType()) { // If this is a COM object, the static class definition might not be complete so we need // to check if the COM object implements the interface. bSupportsItf = ComObject::SupportsInterface(pObj, pInterfaceMT); } #endif // FEATURE_COMINTEROP } GCPROTECT_END(); return bSupportsItf; } Assembly *AssemblyBaseObject::GetAssembly() { WRAPPER_NO_CONTRACT; return m_pAssembly->GetAssembly(); } STRINGREF AllocateString(SString sstr) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; COUNT_T length = sstr.GetCount(); // count of WCHARs excluding terminating NULL STRINGREF strObj = AllocateString(length); memcpyNoGCRefs(strObj->GetBuffer(), sstr.GetUnicode(), length*sizeof(WCHAR)); return strObj; } CHARARRAYREF AllocateCharArray(DWORD dwArrayLength) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; return (CHARARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_CHAR, dwArrayLength); } void Object::ValidateHeap(BOOL bDeep) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; #if defined (VERIFY_HEAP) //no need to verify next object's header in this case //since this is called in verify_heap, which will verfiy every object anyway Validate(bDeep, FALSE); #endif } void Object::SetOffsetObjectRef(DWORD dwOffset, size_t dwValue) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; OBJECTREF* location; OBJECTREF o; location = (OBJECTREF *) &GetData()[dwOffset]; o = ObjectToOBJECTREF(*(Object **) &dwValue); SetObjectReference( location, o ); } void SetObjectReferenceUnchecked(OBJECTREF *dst,OBJECTREF ref) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_CANNOT_TAKE_LOCK; // Assign value. We use casting to avoid going thru the overloaded // OBJECTREF= operator which in this case would trigger a false // write-barrier violation assert. VolatileStore((Object**)dst, OBJECTREFToObject(ref)); #ifdef _DEBUG Thread::ObjectRefAssign(dst); #endif ErectWriteBarrier(dst, ref); } void STDCALL CopyValueClassUnchecked(void* dest, void* src, MethodTable *pMT) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; _ASSERTE(!pMT->IsArray()); // bunch of assumptions about arrays wrong. if (pMT->ContainsPointers()) { memmoveGCRefs(dest, src, pMT->GetNumInstanceFieldBytes()); } else { switch (pMT->GetNumInstanceFieldBytes()) { case 1: *(UINT8*)dest = *(UINT8*)src; break; #ifndef ALIGN_ACCESS // we can hit an alignment fault if the value type has multiple // smaller fields. Example: if there are two I4 fields, the // value class can be aligned to 4-byte boundaries, yet the // NumInstanceFieldBytes is 8 case 2: *(UINT16*)dest = *(UINT16*)src; break; case 4: *(UINT32*)dest = *(UINT32*)src; break; case 8: *(UINT64*)dest = *(UINT64*)src; break; #endif // !ALIGN_ACCESS default: memcpyNoGCRefs(dest, src, pMT->GetNumInstanceFieldBytes()); break; } } } // Copy value class into the argument specified by the argDest. // The destOffset is nonzero when copying values into Nullable<T>, it is the offset // of the T value inside of the Nullable<T> void STDCALL CopyValueClassArgUnchecked(ArgDestination *argDest, void* src, MethodTable *pMT, int destOffset) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; #if defined(UNIX_AMD64_ABI) if (argDest->IsStructPassedInRegs()) { argDest->CopyStructToRegisters(src, pMT->GetNumInstanceFieldBytes(), destOffset); return; } #elif defined(TARGET_ARM64) if (argDest->IsHFA()) { argDest->CopyHFAStructToRegister(src, pMT->GetNumInstanceFieldBytes()); return; } #endif // UNIX_AMD64_ABI // destOffset is only valid for Nullable<T> passed in registers _ASSERTE(destOffset == 0); CopyValueClassUnchecked(argDest->GetDestinationAddress(), src, pMT); } // Initialize the value class argument to zeros void InitValueClassArg(ArgDestination *argDest, MethodTable *pMT) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; #if defined(UNIX_AMD64_ABI) if (argDest->IsStructPassedInRegs()) { argDest->ZeroStructInRegisters(pMT->GetNumInstanceFieldBytes()); return; } #endif InitValueClass(argDest->GetDestinationAddress(), pMT); } #if defined (VERIFY_HEAP) #include "dbginterface.h" // make the checking code goes as fast as possible! #if defined(_MSC_VER) #pragma optimize("tgy", on) #endif #define CREATE_CHECK_STRING(x) #x #define CHECK_AND_TEAR_DOWN(x) \ do{ \ if (!(x)) \ { \ _ASSERTE(!CREATE_CHECK_STRING(x)); \ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); \ } \ } while (0) VOID Object::Validate(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncBlock) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_CANNOT_TAKE_LOCK; if (g_IBCLogger.InstrEnabled() && !GCStress<cfg_any>::IsEnabled()) { // If we are instrumenting for IBC (and GCStress is not enabled) // then skip these Object::Validate() as they slow down the // instrument phase by an order of magnitude return; } if (g_fEEShutDown & ShutDown_Phase2) { // During second phase of shutdown the code below is not guaranteed to work. return; } #ifdef _DEBUG { Thread *pThread = GetThreadNULLOk(); if (pThread != NULL && !(pThread->PreemptiveGCDisabled())) { // Debugger helper threads are special in that they take over for // what would normally be a nonEE thread (the RCThread). If an // EE thread is doing RCThread duty, then it should be treated // as such. // // There are some GC threads in the same kind of category. Note that // GetThread() sometimes returns them, if DLL_THREAD_ATTACH notifications // have run some managed code. if (!dbgOnly_IsSpecialEEThread() && !IsGCSpecialThread()) _ASSERTE(!"OBJECTREF being accessed while thread is in preemptive GC mode."); } } #endif { // ValidateInner can throw or fault on failure which violates contract. CONTRACT_VIOLATION(ThrowsViolation | FaultViolation); // using inner helper because of TRY and stack objects with destructors. ValidateInner(bDeep, bVerifyNextHeader, bVerifySyncBlock); } } VOID Object::ValidateInner(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncBlock) { STATIC_CONTRACT_THROWS; // See CONTRACT_VIOLATION above STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FAULT; // See CONTRACT_VIOLATION above STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_CANNOT_TAKE_LOCK; int lastTest = 0; EX_TRY { // in order to avoid contract violations in the EH code we'll allow AVs here, // they'll be handled in the catch block AVInRuntimeImplOkayHolder avOk; MethodTable *pMT = GetGCSafeMethodTable(); lastTest = 1; CHECK_AND_TEAR_DOWN(pMT && pMT->Validate()); lastTest = 2; bool noRangeChecks = (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_NO_RANGE_CHECKS) == EEConfig::HEAPVERIFY_NO_RANGE_CHECKS; // noRangeChecks depends on initial values being FALSE BOOL bSmallObjectHeapPtr = FALSE, bLargeObjectHeapPtr = FALSE; if (!noRangeChecks) { bSmallObjectHeapPtr = GCHeapUtilities::GetGCHeap()->IsHeapPointer(this, true); if (!bSmallObjectHeapPtr) bLargeObjectHeapPtr = GCHeapUtilities::GetGCHeap()->IsHeapPointer(this); CHECK_AND_TEAR_DOWN(bSmallObjectHeapPtr || bLargeObjectHeapPtr); } lastTest = 3; if (bDeep) { CHECK_AND_TEAR_DOWN(GetHeader()->Validate(bVerifySyncBlock)); } lastTest = 4; if (bDeep && (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)) { GCHeapUtilities::GetGCHeap()->ValidateObjectMember(this); } lastTest = 5; // since bSmallObjectHeapPtr is initialized to FALSE // we skip checking noRangeChecks since if skipping // is enabled bSmallObjectHeapPtr will always be false. if (bSmallObjectHeapPtr) { CHECK_AND_TEAR_DOWN(!GCHeapUtilities::GetGCHeap()->IsLargeObject(this)); } lastTest = 6; lastTest = 7; _ASSERTE(GCHeapUtilities::IsGCHeapInitialized()); // try to validate next object's header if (bDeep && bVerifyNextHeader && GCHeapUtilities::GetGCHeap()->RuntimeStructuresValid() //NextObj could be very slow if concurrent GC is going on && !GCHeapUtilities::GetGCHeap ()->IsConcurrentGCInProgress ()) { Object * nextObj = GCHeapUtilities::GetGCHeap ()->NextObj (this); if ((nextObj != NULL) && (nextObj->GetGCSafeMethodTable() != g_pFreeObjectMethodTable)) { // we need a read barrier here - to make sure we read the object header _after_ // reading data that tells us that the object is eligible for verification // (also see: gc.cpp/a_fit_segment_end_p) VOLATILE_MEMORY_BARRIER(); CHECK_AND_TEAR_DOWN(nextObj->GetHeader()->Validate(FALSE)); } } lastTest = 8; #ifdef FEATURE_64BIT_ALIGNMENT if (pMT->RequiresAlign8()) { CHECK_AND_TEAR_DOWN((((size_t)this) & 0x7) == (size_t)(pMT->IsValueType()?4:0)); } lastTest = 9; #endif // FEATURE_64BIT_ALIGNMENT } EX_CATCH { STRESS_LOG3(LF_ASSERT, LL_ALWAYS, "Detected use of corrupted OBJECTREF: %p [MT=%p] (lastTest=%d)", this, lastTest > 0 ? (*(size_t*)this) : 0, lastTest); CHECK_AND_TEAR_DOWN(!"Detected use of a corrupted OBJECTREF. Possible GC hole."); } EX_END_CATCH(SwallowAllExceptions); } #endif // VERIFY_HEAP /*==================================NewString=================================== **Action: Creates a System.String object. **Returns: **Arguments: **Exceptions: ==============================================================================*/ STRINGREF StringObject::NewString(INT32 length) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(length>=0); } CONTRACTL_END; STRINGREF pString; if (length<0) { return NULL; } else if (length == 0) { return GetEmptyString(); } else { pString = AllocateString(length); _ASSERTE(pString->GetBuffer()[length] == 0); return pString; } } /*==================================NewString=================================== **Action: Many years ago, VB didn't have the concept of a byte array, so enterprising ** users created one by allocating a BSTR with an odd length and using it to ** store bytes. A generation later, we're still stuck supporting this behavior. ** The way that we do this is to take advantage of the difference between the ** array length and the string length. The string length will always be the ** number of characters between the start of the string and the terminating 0. ** If we need an odd number of bytes, we'll take one wchar after the terminating 0. ** (e.g. at position StringLength+1). The high-order byte of this wchar is ** reserved for flags and the low-order byte is our odd byte. This function is ** used to allocate a string of that shape, but we don't actually mark the ** trailing byte as being in use yet. **Returns: A newly allocated string. Null if length is less than 0. **Arguments: length -- the length of the string to allocate ** bHasTrailByte -- whether the string also has a trailing byte. **Exceptions: OutOfMemoryException if AllocateString fails. ==============================================================================*/ STRINGREF StringObject::NewString(INT32 length, BOOL bHasTrailByte) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(length>=0 && length != INT32_MAX); } CONTRACTL_END; STRINGREF pString; if (length<0 || length == INT32_MAX) { return NULL; } else if (length == 0) { return GetEmptyString(); } else { pString = AllocateString(length); _ASSERTE(pString->GetBuffer()[length]==0); if (bHasTrailByte) { _ASSERTE(pString->GetBuffer()[length+1]==0); } } return pString; } //======================================================================== // Creates a System.String object and initializes from // the supplied null-terminated C string. // // Maps NULL to null. This function does *not* return null to indicate // error situations: it throws an exception instead. //======================================================================== STRINGREF StringObject::NewString(const WCHAR *pwsz) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; if (!pwsz) { return NULL; } else { DWORD nch = (DWORD)wcslen(pwsz); if (nch==0) { return GetEmptyString(); } #if 0 // // This assert is disabled because it is valid for us to get a // pointer from the gc heap here as long as it is pinned. This // can happen when a string is marshalled to unmanaged by // pinning and then later put into a struct and that struct is // then marshalled to managed. // _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) pwsz) || !"pwsz can not point to GC Heap"); #endif // 0 STRINGREF pString = AllocateString( nch ); memcpyNoGCRefs(pString->GetBuffer(), pwsz, nch*sizeof(WCHAR)); _ASSERTE(pString->GetBuffer()[nch] == 0); return pString; } } #if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("y", on) // Small critical routines, don't put in EBP frame #endif STRINGREF StringObject::NewString(const WCHAR *pwsz, int length) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(length>=0); } CONTRACTL_END; if (!pwsz) { return NULL; } else if (length <= 0) { return GetEmptyString(); } else { #if 0 // // This assert is disabled because it is valid for us to get a // pointer from the gc heap here as long as it is pinned. This // can happen when a string is marshalled to unmanaged by // pinning and then later put into a struct and that struct is // then marshalled to managed. // _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) pwsz) || !"pwsz can not point to GC Heap"); #endif // 0 STRINGREF pString = AllocateString(length); memcpyNoGCRefs(pString->GetBuffer(), pwsz, length*sizeof(WCHAR)); _ASSERTE(pString->GetBuffer()[length] == 0); return pString; } } #if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("", on) // Go back to command line default optimizations #endif STRINGREF StringObject::NewString(LPCUTF8 psz) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; THROWS; PRECONDITION(CheckPointer(psz)); } CONTRACTL_END; int length = (int)strlen(psz); if (length == 0) { return GetEmptyString(); } CQuickBytes qb; WCHAR* pwsz = (WCHAR*) qb.AllocThrows((length) * sizeof(WCHAR)); length = WszMultiByteToWideChar(CP_UTF8, 0, psz, length, pwsz, length); if (length == 0) { COMPlusThrow(kArgumentException, W("Arg_InvalidUTF8String")); } return NewString(pwsz, length); } STRINGREF StringObject::NewString(LPCUTF8 psz, int cBytes) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; THROWS; PRECONDITION(CheckPointer(psz, NULL_OK)); } CONTRACTL_END; if (!psz) return NULL; _ASSERTE(psz); _ASSERTE(cBytes >= 0); if (cBytes == 0) { return GetEmptyString(); } int cWszBytes = 0; if (!ClrSafeInt<int>::multiply(cBytes, sizeof(WCHAR), cWszBytes)) COMPlusThrowOM(); CQuickBytes qb; WCHAR* pwsz = (WCHAR*) qb.AllocThrows(cWszBytes); int length = WszMultiByteToWideChar(CP_UTF8, 0, psz, cBytes, pwsz, cBytes); if (length == 0) { COMPlusThrow(kArgumentException, W("Arg_InvalidUTF8String")); } return NewString(pwsz, length); } // // // STATIC MEMBER VARIABLES // // STRINGREF* StringObject::EmptyStringRefPtr=NULL; //The special string helpers are used as flag bits for weird strings that have bytes //after the terminating 0. The only case where we use this right now is the VB BSTR as //byte array which is described in MakeStringAsByteArrayFromBytes. #define SPECIAL_STRING_VB_BYTE_ARRAY 0x100 FORCEINLINE BOOL MARKS_VB_BYTE_ARRAY(WCHAR x) { return static_cast<BOOL>(x & SPECIAL_STRING_VB_BYTE_ARRAY); } FORCEINLINE WCHAR MAKE_VB_TRAIL_BYTE(BYTE x) { return static_cast<WCHAR>(x) | SPECIAL_STRING_VB_BYTE_ARRAY; } FORCEINLINE BYTE GET_VB_TRAIL_BYTE(WCHAR x) { return static_cast<BYTE>(x & 0xFF); } /*==============================InitEmptyStringRefPtr============================ **Action: Gets an empty string refptr, cache the result. **Returns: The retrieved STRINGREF. ==============================================================================*/ STRINGREF* StringObject::InitEmptyStringRefPtr() { CONTRACTL { THROWS; MODE_ANY; GC_TRIGGERS; } CONTRACTL_END; GCX_COOP(); EEStringData data(0, W(""), TRUE); EmptyStringRefPtr = SystemDomain::System()->DefaultDomain()->GetLoaderAllocator()->GetStringObjRefPtrFromUnicodeString(&data); return EmptyStringRefPtr; } // strAChars must be null-terminated, with an appropriate aLength // strBChars must be null-terminated, with an appropriate bLength OR bLength == -1 // If bLength == -1, we stop on the first null character in strBChars BOOL StringObject::CaseInsensitiveCompHelper(_In_reads_(aLength) WCHAR *strAChars, _In_z_ INT8 *strBChars, INT32 aLength, INT32 bLength, INT32 *result) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(strAChars)); PRECONDITION(CheckPointer(strBChars)); PRECONDITION(CheckPointer(result)); } CONTRACTL_END; WCHAR *strAStart = strAChars; INT8 *strBStart = strBChars; unsigned charA; unsigned charB; for(;;) { charA = *strAChars; charB = (unsigned) *strBChars; //Case-insensitive comparison on chars greater than 0x7F //requires a locale-aware casing operation and we're not going there. if ((charA|charB)>0x7F) { *result = 0; return FALSE; } // uppercase both chars. if (charA>='a' && charA<='z') { charA ^= 0x20; } if (charB>='a' && charB<='z') { charB ^= 0x20; } //Return the (case-insensitive) difference between them. if (charA!=charB) { *result = (int)(charA-charB); return TRUE; } if (charA==0) // both strings have null character { if (bLength == -1) { *result = aLength - static_cast<INT32>(strAChars - strAStart); return TRUE; } if (strAChars==strAStart + aLength || strBChars==strBStart + bLength) { *result = aLength - bLength; return TRUE; } // else both embedded zeros } // Next char strAChars++; strBChars++; } } /*============================InternalTrailByteCheck============================ **Action: Many years ago, VB didn't have the concept of a byte array, so enterprising ** users created one by allocating a BSTR with an odd length and using it to ** store bytes. A generation later, we're still stuck supporting this behavior. ** The way that we do this is stick the trail byte in the sync block ** whenever we encounter such a situation. Since we expect this to be a very corner case ** accessing the sync block seems like a good enough solution ** **Returns: True if <CODE>str</CODE> contains a VB trail byte, false otherwise. **Arguments: str -- The string to be examined. **Exceptions: None ==============================================================================*/ BOOL StringObject::HasTrailByte() { WRAPPER_NO_CONTRACT; SyncBlock * pSyncBlock = PassiveGetSyncBlock(); if(pSyncBlock != NULL) { return pSyncBlock->HasCOMBstrTrailByte(); } return FALSE; } /*=================================GetTrailByte================================= **Action: If <CODE>str</CODE> contains a vb trail byte, returns a copy of it. **Returns: True if <CODE>str</CODE> contains a trail byte. *bTrailByte is set to ** the byte in question if <CODE>str</CODE> does have a trail byte, otherwise ** it's set to 0. **Arguments: str -- The string being examined. ** bTrailByte -- An out param to hold the value of the trail byte. **Exceptions: None. ==============================================================================*/ BOOL StringObject::GetTrailByte(BYTE *bTrailByte) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(bTrailByte); *bTrailByte=0; BOOL retValue = HasTrailByte(); if(retValue) { *bTrailByte = GET_VB_TRAIL_BYTE(GetHeader()->PassiveGetSyncBlock()->GetCOMBstrTrailByte()); } return retValue; } /*=================================SetTrailByte================================= **Action: Sets the trail byte in the sync block **Returns: True. **Arguments: str -- The string into which to set the trail byte. ** bTrailByte -- The trail byte to be added to the string. **Exceptions: None. ==============================================================================*/ BOOL StringObject::SetTrailByte(BYTE bTrailByte) { WRAPPER_NO_CONTRACT; GetHeader()->GetSyncBlock()->SetCOMBstrTrailByte(MAKE_VB_TRAIL_BYTE(bTrailByte)); return TRUE; } #ifdef USE_CHECKED_OBJECTREFS //------------------------------------------------------------- // Default constructor, for non-initializing declarations: // // OBJECTREF or; //------------------------------------------------------------- OBJECTREF::OBJECTREF() { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; m_asObj = (Object*)POISONC; Thread::ObjectRefNew(this); } //------------------------------------------------------------- // Copy constructor, for passing OBJECTREF's as function arguments. //------------------------------------------------------------- OBJECTREF::OBJECTREF(const OBJECTREF & objref) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_FORBID_FAULT; VALIDATEOBJECT(objref.m_asObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(&objref)); if ((objref.m_asObj != 0) && ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this )) { _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!"); } m_asObj = objref.m_asObj; if (m_asObj != 0) { ENABLESTRESSHEAP(); } Thread::ObjectRefNew(this); } //------------------------------------------------------------- // VolatileLoadWithoutBarrier constructor //------------------------------------------------------------- OBJECTREF::OBJECTREF(const OBJECTREF *pObjref, tagVolatileLoadWithoutBarrier tag) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_FORBID_FAULT; Object* objrefAsObj = VolatileLoadWithoutBarrier(&pObjref->m_asObj); VALIDATEOBJECT(objrefAsObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(pObjref)); if ((objrefAsObj != 0) && ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this )) { _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!"); } m_asObj = objrefAsObj; if (m_asObj != 0) { ENABLESTRESSHEAP(); } Thread::ObjectRefNew(this); } //------------------------------------------------------------- // To allow NULL to be used as an OBJECTREF. //------------------------------------------------------------- OBJECTREF::OBJECTREF(TADDR nul) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; //_ASSERTE(nul == 0); m_asObj = (Object*)nul; if( m_asObj != NULL) { // REVISIT_TODO: fix this, why is this constructor being used for non-null object refs? STATIC_CONTRACT_VIOLATION(ModeViolation); VALIDATEOBJECT(m_asObj); ENABLESTRESSHEAP(); } Thread::ObjectRefNew(this); } //------------------------------------------------------------- // This is for the GC's use only. Non-GC code should never // use the "Object" class directly. The unused "int" argument // prevents C++ from using this to implicitly convert Object*'s // to OBJECTREF. //------------------------------------------------------------- OBJECTREF::OBJECTREF(Object *pObject) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_FORBID_FAULT; DEBUG_ONLY_FUNCTION; if ((pObject != 0) && ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this )) { _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!"); } m_asObj = pObject; VALIDATEOBJECT(m_asObj); if (m_asObj != 0) { ENABLESTRESSHEAP(); } Thread::ObjectRefNew(this); } void OBJECTREF::Validate(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncBlock) { LIMITED_METHOD_CONTRACT; if (m_asObj) { m_asObj->Validate(bDeep, bVerifyNextHeader, bVerifySyncBlock); } } //------------------------------------------------------------- // Test against NULL. //------------------------------------------------------------- int OBJECTREF::operator!() const { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; // We don't do any validation here, as we want to allow zero comparison in preemptive mode return !m_asObj; } //------------------------------------------------------------- // Compare two OBJECTREF's. //------------------------------------------------------------- int OBJECTREF::operator==(const OBJECTREF &objref) const { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; if (objref.m_asObj != NULL) // Allow comparison to zero in preemptive mode { // REVISIT_TODO: Weakening the contract system a little bit here. We should really // add a special NULLOBJECTREF which can be used for these situations and have // a seperate code path for that with the correct contract protections. STATIC_CONTRACT_VIOLATION(ModeViolation); VALIDATEOBJECT(objref.m_asObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(&objref)); VALIDATEOBJECT(m_asObj); // If this assert fires, you probably did not protect // your OBJECTREF and a GC might have occurred. To // where the possible GC was, set a breakpoint in Thread::TriggersGC _ASSERTE(Thread::IsObjRefValid(this)); if (m_asObj != 0 || objref.m_asObj != 0) { ENABLESTRESSHEAP(); } } return m_asObj == objref.m_asObj; } //------------------------------------------------------------- // Compare two OBJECTREF's. //------------------------------------------------------------- int OBJECTREF::operator!=(const OBJECTREF &objref) const { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; if (objref.m_asObj != NULL) // Allow comparison to zero in preemptive mode { // REVISIT_TODO: Weakening the contract system a little bit here. We should really // add a special NULLOBJECTREF which can be used for these situations and have // a seperate code path for that with the correct contract protections. STATIC_CONTRACT_VIOLATION(ModeViolation); VALIDATEOBJECT(objref.m_asObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(&objref)); VALIDATEOBJECT(m_asObj); // If this assert fires, you probably did not protect // your OBJECTREF and a GC might have occurred. To // where the possible GC was, set a breakpoint in Thread::TriggersGC _ASSERTE(Thread::IsObjRefValid(this)); if (m_asObj != 0 || objref.m_asObj != 0) { ENABLESTRESSHEAP(); } } return m_asObj != objref.m_asObj; } //------------------------------------------------------------- // Forward method calls. //------------------------------------------------------------- Object* OBJECTREF::operator->() { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; VALIDATEOBJECT(m_asObj); // If this assert fires, you probably did not protect // your OBJECTREF and a GC might have occurred. To // where the possible GC was, set a breakpoint in Thread::TriggersGC _ASSERTE(Thread::IsObjRefValid(this)); if (m_asObj != 0) { ENABLESTRESSHEAP(); } // if you are using OBJECTREF directly, // you probably want an Object * return (Object *)m_asObj; } //------------------------------------------------------------- // Forward method calls. //------------------------------------------------------------- const Object* OBJECTREF::operator->() const { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; VALIDATEOBJECT(m_asObj); // If this assert fires, you probably did not protect // your OBJECTREF and a GC might have occurred. To // where the possible GC was, set a breakpoint in Thread::TriggersGC _ASSERTE(Thread::IsObjRefValid(this)); if (m_asObj != 0) { ENABLESTRESSHEAP(); } // if you are using OBJECTREF directly, // you probably want an Object * return (Object *)m_asObj; } //------------------------------------------------------------- // Assignment. We don't validate the destination so as not // to break the sequence: // // OBJECTREF or; // or = ...; //------------------------------------------------------------- OBJECTREF& OBJECTREF::operator=(const OBJECTREF &objref) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; VALIDATEOBJECT(objref.m_asObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(&objref)); if ((objref.m_asObj != 0) && ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this )) { _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!"); } Thread::ObjectRefAssign(this); m_asObj = objref.m_asObj; if (m_asObj != 0) { ENABLESTRESSHEAP(); } return *this; } //------------------------------------------------------------- // Allows for the assignment of NULL to a OBJECTREF //------------------------------------------------------------- OBJECTREF& OBJECTREF::operator=(TADDR nul) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; _ASSERTE(nul == 0); Thread::ObjectRefAssign(this); m_asObj = (Object*)nul; if (m_asObj != 0) { ENABLESTRESSHEAP(); } return *this; } #endif // DEBUG #ifdef _DEBUG void* __cdecl GCSafeMemCpy(void * dest, const void * src, size_t len) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; if (!(((*(BYTE**)&dest) < g_lowest_address ) || ((*(BYTE**)&dest) >= g_highest_address))) { Thread* pThread = GetThreadNULLOk(); // GCHeapUtilities::IsHeapPointer has race when called in preemptive mode. It walks the list of segments // that can be modified by GC. Do the check below only if it is safe to do so. if (pThread != NULL && pThread->PreemptiveGCDisabled()) { // Note there is memcpyNoGCRefs which will allow you to do a memcpy into the GC // heap if you really know you don't need to call the write barrier _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) dest) || !"using memcpy to copy into the GC heap, use CopyValueClass"); } } return memcpyNoGCRefs(dest, src, len); } #endif // _DEBUG // This function clears a piece of memory in a GC safe way. It makes the guarantee // that it will clear memory in at least pointer sized chunks whenever possible. // Unaligned memory at the beginning and remaining bytes at the end are written bytewise. // We must make this guarantee whenever we clear memory in the GC heap that could contain // object references. The GC or other user threads can read object references at any time, // clearing them bytewise can result in a read on another thread getting incorrect data. void __fastcall ZeroMemoryInGCHeap(void* mem, size_t size) { WRAPPER_NO_CONTRACT; BYTE* memBytes = (BYTE*) mem; BYTE* endBytes = &memBytes[size]; // handle unaligned bytes at the beginning while (!IS_ALIGNED(memBytes, sizeof(PTR_PTR_VOID)) && memBytes < endBytes) *memBytes++ = 0; // now write pointer sized pieces // volatile ensures that this doesn't get optimized back into a memset call size_t nPtrs = (endBytes - memBytes) / sizeof(PTR_PTR_VOID); PTR_VOID volatile * memPtr = (PTR_PTR_VOID) memBytes; for (size_t i = 0; i < nPtrs; i++) *memPtr++ = 0; // handle remaining bytes at the end memBytes = (BYTE*) memPtr; while (memBytes < endBytes) *memBytes++ = 0; } void StackTraceArray::Append(StackTraceElement const * begin, StackTraceElement const * end) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(IsProtectedByGCFrame((OBJECTREF*)this)); } CONTRACTL_END; // ensure that only one thread can write to the array EnsureThreadAffinity(); size_t newsize = Size() + (end - begin); Grow(newsize); memcpyNoGCRefs(GetData() + Size(), begin, (end - begin) * sizeof(StackTraceElement)); MemoryBarrier(); // prevent the newsize from being reordered with the array copy SetSize(newsize); #if defined(_DEBUG) CheckState(); #endif } void StackTraceArray::CheckState() const { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; if (!m_array) return; assert(GetObjectThread() == GetThreadNULLOk()); size_t size = Size(); StackTraceElement const * p; p = GetData(); for (size_t i = 0; i < size; ++i) assert(p[i].pFunc != NULL); } void StackTraceArray::Grow(size_t grow_size) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; INJECT_FAULT(ThrowOutOfMemory();); PRECONDITION(IsProtectedByGCFrame((OBJECTREF*)this)); } CONTRACTL_END; size_t raw_size = grow_size * sizeof(StackTraceElement) + sizeof(ArrayHeader); if (!m_array) { SetArray(I1ARRAYREF(AllocatePrimitiveArray(ELEMENT_TYPE_I1, static_cast<DWORD>(raw_size)))); SetSize(0); SetObjectThread(); } else { if (Capacity() >= raw_size) return; // allocate a new array, copy the data size_t new_capacity = Max(Capacity() * 2, raw_size); _ASSERTE(new_capacity >= grow_size * sizeof(StackTraceElement) + sizeof(ArrayHeader)); I1ARRAYREF newarr = (I1ARRAYREF) AllocatePrimitiveArray(ELEMENT_TYPE_I1, static_cast<DWORD>(new_capacity)); memcpyNoGCRefs(newarr->GetDirectPointerToNonObjectElements(), GetRaw(), Size() * sizeof(StackTraceElement) + sizeof(ArrayHeader)); SetArray(newarr); } } void StackTraceArray::EnsureThreadAffinity() { WRAPPER_NO_CONTRACT; if (!m_array) return; if (GetObjectThread() != GetThreadNULLOk()) { // object is being changed by a thread different from the one which created it // make a copy of the array to prevent a race condition when two different threads try to change it StackTraceArray copy; GCPROTECT_BEGIN(copy); copy.CopyFrom(*this); this->Swap(copy); GCPROTECT_END(); } } // Deep copies the stack trace array void StackTraceArray::CopyFrom(StackTraceArray const & src) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; INJECT_FAULT(ThrowOutOfMemory();); PRECONDITION(IsProtectedByGCFrame((OBJECTREF*)this)); PRECONDITION(IsProtectedByGCFrame((OBJECTREF*)&src)); } CONTRACTL_END; m_array = (I1ARRAYREF) AllocatePrimitiveArray(ELEMENT_TYPE_I1, static_cast<DWORD>(src.Capacity())); Volatile<size_t> size = src.Size(); memcpyNoGCRefs(GetRaw(), src.GetRaw(), size * sizeof(StackTraceElement) + sizeof(ArrayHeader)); SetSize(size); // set size to the exact value which was used when we copied the data // another thread might have changed it at the time of copying SetObjectThread(); // affinitize the newly created array with the current thread } #ifdef _DEBUG //=============================================================================== // Code that insures that our unmanaged version of Nullable is consistant with // the managed version Nullable<T> for all T. void Nullable::CheckFieldOffsets(TypeHandle nullableType) { LIMITED_METHOD_CONTRACT; /*** // The non-instantiated method tables like List<T> that are used // by reflection and verification do not have correct field offsets // but we never make instances of these anyway. if (nullableMT->ContainsGenericVariables()) return; ***/ MethodTable* nullableMT = nullableType.GetMethodTable(); // insure that the managed version of the table is the same as the // unmanaged. Note that we can't do this in corelib.h because this // class is generic and field layout depends on the instantiation. _ASSERTE(nullableMT->GetNumInstanceFields() == 2); FieldDesc* field = nullableMT->GetApproxFieldDescListRaw(); _ASSERTE(strcmp(field->GetDebugName(), "hasValue") == 0); // _ASSERTE(field->GetOffset() == offsetof(Nullable, hasValue)); field++; _ASSERTE(strcmp(field->GetDebugName(), "value") == 0); // _ASSERTE(field->GetOffset() == offsetof(Nullable, value)); } #endif //=============================================================================== // Returns true if nullableMT is Nullable<T> for T is equivalent to paramMT BOOL Nullable::IsNullableForTypeHelper(MethodTable* nullableMT, MethodTable* paramMT) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; if (!nullableMT->IsNullable()) return FALSE; // we require the parameter types to be equivalent return TypeHandle(paramMT).IsEquivalentTo(nullableMT->GetInstantiation()[0]); } //=============================================================================== // Returns true if nullableMT is Nullable<T> for T == paramMT BOOL Nullable::IsNullableForTypeHelperNoGC(MethodTable* nullableMT, MethodTable* paramMT) { LIMITED_METHOD_CONTRACT; if (!nullableMT->IsNullable()) return FALSE; // we require an exact match of the parameter types return TypeHandle(paramMT) == nullableMT->GetInstantiation()[0]; } //=============================================================================== CLR_BOOL* Nullable::HasValueAddr(MethodTable* nullableMT) { LIMITED_METHOD_CONTRACT; _ASSERTE(strcmp(nullableMT->GetApproxFieldDescListRaw()[0].GetDebugName(), "hasValue") == 0); _ASSERTE(nullableMT->GetApproxFieldDescListRaw()[0].GetOffset() == 0); return (CLR_BOOL*) this; } //=============================================================================== void* Nullable::ValueAddr(MethodTable* nullableMT) { LIMITED_METHOD_CONTRACT; _ASSERTE(strcmp(nullableMT->GetApproxFieldDescListRaw()[1].GetDebugName(), "value") == 0); return (((BYTE*) this) + nullableMT->GetApproxFieldDescListRaw()[1].GetOffset()); } //=============================================================================== // Special Logic to box a nullable<T> as a boxed<T> OBJECTREF Nullable::Box(void* srcPtr, MethodTable* nullableMT) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; FAULT_NOT_FATAL(); // FIX_NOW: why do we need this? Nullable* src = (Nullable*) srcPtr; _ASSERTE(IsNullableType(nullableMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!nullableMT->ContainsGenericVariables()); if (!*src->HasValueAddr(nullableMT)) return NULL; OBJECTREF obj = 0; GCPROTECT_BEGININTERIOR (src); MethodTable* argMT = nullableMT->GetInstantiation()[0].AsMethodTable(); obj = argMT->Allocate(); CopyValueClass(obj->UnBox(), src->ValueAddr(nullableMT), argMT); GCPROTECT_END (); return obj; } //=============================================================================== // Special Logic to unbox a boxed T as a nullable<T> BOOL Nullable::UnBox(void* destPtr, OBJECTREF boxedVal, MethodTable* destMT) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; Nullable* dest = (Nullable*) destPtr; BOOL fRet = TRUE; // We should only get here if we are unboxing a T as a Nullable<T> _ASSERTE(IsNullableType(destMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!destMT->ContainsGenericVariables()); if (boxedVal == NULL) { // Logically we are doing *dest->HasValueAddr(destMT) = false; // We zero out the whole structure becasue it may contain GC references // and these need to be initialized to zero. (could optimize in the non-GC case) InitValueClass(destPtr, destMT); fRet = TRUE; } else { GCPROTECT_BEGIN(boxedVal); if (!IsNullableForType(destMT, boxedVal->GetMethodTable())) { // For safety's sake, also allow true nullables to be unboxed normally. // This should not happen normally, but we want to be robust if (destMT->IsEquivalentTo(boxedVal->GetMethodTable())) { CopyValueClass(dest, boxedVal->GetData(), destMT); fRet = TRUE; } else { fRet = FALSE; } } else { *dest->HasValueAddr(destMT) = true; CopyValueClass(dest->ValueAddr(destMT), boxedVal->UnBox(), boxedVal->GetMethodTable()); fRet = TRUE; } GCPROTECT_END(); } return fRet; } //=============================================================================== // Special Logic to unbox a boxed T as a nullable<T> // Does not handle type equivalence (may conservatively return FALSE) BOOL Nullable::UnBoxNoGC(void* destPtr, OBJECTREF boxedVal, MethodTable* destMT) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; Nullable* dest = (Nullable*) destPtr; // We should only get here if we are unboxing a T as a Nullable<T> _ASSERTE(IsNullableType(destMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!destMT->ContainsGenericVariables()); if (boxedVal == NULL) { // Logically we are doing *dest->HasValueAddr(destMT) = false; // We zero out the whole structure becasue it may contain GC references // and these need to be initialized to zero. (could optimize in the non-GC case) InitValueClass(destPtr, destMT); } else { if (!IsNullableForTypeNoGC(destMT, boxedVal->GetMethodTable())) { // For safety's sake, also allow true nullables to be unboxed normally. // This should not happen normally, but we want to be robust if (destMT == boxedVal->GetMethodTable()) { CopyValueClass(dest, boxedVal->GetData(), destMT); return TRUE; } return FALSE; } *dest->HasValueAddr(destMT) = true; CopyValueClass(dest->ValueAddr(destMT), boxedVal->UnBox(), boxedVal->GetMethodTable()); } return TRUE; } //=============================================================================== // Special Logic to unbox a boxed T as a nullable<T> into an argument // specified by the argDest. // Does not handle type equivalence (may conservatively return FALSE) BOOL Nullable::UnBoxIntoArgNoGC(ArgDestination *argDest, OBJECTREF boxedVal, MethodTable* destMT) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; #if defined(UNIX_AMD64_ABI) if (argDest->IsStructPassedInRegs()) { // We should only get here if we are unboxing a T as a Nullable<T> _ASSERTE(IsNullableType(destMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!destMT->ContainsGenericVariables()); if (boxedVal == NULL) { // Logically we are doing *dest->HasValueAddr(destMT) = false; // We zero out the whole structure becasue it may contain GC references // and these need to be initialized to zero. (could optimize in the non-GC case) InitValueClassArg(argDest, destMT); } else { if (!IsNullableForTypeNoGC(destMT, boxedVal->GetMethodTable())) { // For safety's sake, also allow true nullables to be unboxed normally. // This should not happen normally, but we want to be robust if (destMT == boxedVal->GetMethodTable()) { CopyValueClassArg(argDest, boxedVal->GetData(), destMT, 0); return TRUE; } return FALSE; } Nullable* dest = (Nullable*)argDest->GetStructGenRegDestinationAddress(); *dest->HasValueAddr(destMT) = true; int destOffset = (BYTE*)dest->ValueAddr(destMT) - (BYTE*)dest; CopyValueClassArg(argDest, boxedVal->UnBox(), boxedVal->GetMethodTable(), destOffset); } return TRUE; } #endif // UNIX_AMD64_ABI return UnBoxNoGC(argDest->GetDestinationAddress(), boxedVal, destMT); } //=============================================================================== // Special Logic to unbox a boxed T as a nullable<T> // Does not do any type checks. void Nullable::UnBoxNoCheck(void* destPtr, OBJECTREF boxedVal, MethodTable* destMT) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; Nullable* dest = (Nullable*) destPtr; // We should only get here if we are unboxing a T as a Nullable<T> _ASSERTE(IsNullableType(destMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!destMT->ContainsGenericVariables()); if (boxedVal == NULL) { // Logically we are doing *dest->HasValueAddr(destMT) = false; // We zero out the whole structure becasue it may contain GC references // and these need to be initialized to zero. (could optimize in the non-GC case) InitValueClass(destPtr, destMT); } else { if (IsNullableType(boxedVal->GetMethodTable())) { // For safety's sake, also allow true nullables to be unboxed normally. // This should not happen normally, but we want to be robust CopyValueClass(dest, boxedVal->GetData(), destMT); } *dest->HasValueAddr(destMT) = true; CopyValueClass(dest->ValueAddr(destMT), boxedVal->UnBox(), boxedVal->GetMethodTable()); } } //=============================================================================== // a boxed Nullable<T> should either be null or a boxed T, but sometimes it is // useful to have a 'true' boxed Nullable<T> (that is it has two fields). This // function returns a 'normalized' version of this pointer. OBJECTREF Nullable::NormalizeBox(OBJECTREF obj) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; if (obj != NULL) { MethodTable* retMT = obj->GetMethodTable(); if (Nullable::IsNullableType(retMT)) obj = Nullable::Box(obj->GetData(), retMT); } return obj; } void ThreadBaseObject::SetInternal(Thread *it) { WRAPPER_NO_CONTRACT; // only allow a transition from NULL to non-NULL _ASSERTE((m_InternalThread == NULL) && (it != NULL)); m_InternalThread = it; // Now the native Thread will only be destroyed after the managed Thread is collected. // Tell the GC that the managed Thread actually represents much more memory. GCInterface::AddMemoryPressure(sizeof(Thread)); } void ThreadBaseObject::ClearInternal() { WRAPPER_NO_CONTRACT; _ASSERTE(m_InternalThread != NULL); m_InternalThread = NULL; GCInterface::RemoveMemoryPressure(sizeof(Thread)); } #endif // #ifndef DACCESS_COMPILE StackTraceElement const & StackTraceArray::operator[](size_t index) const { WRAPPER_NO_CONTRACT; return GetData()[index]; } StackTraceElement & StackTraceArray::operator[](size_t index) { WRAPPER_NO_CONTRACT; return GetData()[index]; } #if !defined(DACCESS_COMPILE) // Define the lock used to access stacktrace from an exception object SpinLock g_StackTraceArrayLock; void ExceptionObject::SetStackTrace(I1ARRAYREF stackTrace, PTRARRAYREF dynamicMethodArray) { CONTRACTL { GC_NOTRIGGER; NOTHROW; MODE_COOPERATIVE; } CONTRACTL_END; #ifdef STRESS_LOG if (StressLog::StressLogOn(~0u, 0)) { StressLog::CreateThreadStressLog(); } #endif SpinLock::AcquireLock(&g_StackTraceArrayLock); SetObjectReference((OBJECTREF*)&_stackTrace, (OBJECTREF)stackTrace); SetObjectReference((OBJECTREF*)&_dynamicMethods, (OBJECTREF)dynamicMethodArray); SpinLock::ReleaseLock(&g_StackTraceArrayLock); } #endif // !defined(DACCESS_COMPILE) void ExceptionObject::GetStackTrace(StackTraceArray & stackTrace, PTRARRAYREF * outDynamicMethodArray /*= NULL*/) const { CONTRACTL { GC_NOTRIGGER; NOTHROW; MODE_COOPERATIVE; } CONTRACTL_END; #if !defined(DACCESS_COMPILE) SpinLock::AcquireLock(&g_StackTraceArrayLock); #endif // !defined(DACCESS_COMPILE) StackTraceArray temp(_stackTrace); stackTrace.Swap(temp); if (outDynamicMethodArray != NULL) { *outDynamicMethodArray = _dynamicMethods; } #if !defined(DACCESS_COMPILE) SpinLock::ReleaseLock(&g_StackTraceArrayLock); #endif // !defined(DACCESS_COMPILE) } bool LAHashDependentHashTrackerObject::IsLoaderAllocatorLive() { return (ObjectFromHandle(_dependentHandle) != NULL); } void LAHashDependentHashTrackerObject::GetDependentAndLoaderAllocator(OBJECTREF *pLoaderAllocatorRef, GCHEAPHASHOBJECTREF *pGCHeapHash) { OBJECTREF primary = ObjectFromHandle(_dependentHandle); if (pLoaderAllocatorRef != NULL) *pLoaderAllocatorRef = primary; IGCHandleManager *mgr = GCHandleUtilities::GetGCHandleManager(); // Secondary is tracked only if primary is non-null if (pGCHeapHash != NULL) *pGCHeapHash = (GCHEAPHASHOBJECTREF)(OBJECTREF)((primary != NULL) ? mgr->GetDependentHandleSecondary(_dependentHandle) : NULL); }
1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/tests/Interop/COM/NativeClients/Dispatch/Client.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "ClientTests.h" #include <memory> #include <windows_version_helpers.h> void Validate_Numeric_In_ReturnByRef(); void Validate_Float_In_ReturnAndUpdateByRef(); void Validate_Double_In_ReturnAndUpdateByRef(); void Validate_LCID_Marshaled(); void Validate_Enumerator(); template<COINIT TM> struct ComInit { const HRESULT Result; ComInit() : Result{ ::CoInitializeEx(nullptr, TM) } { } ~ComInit() { if (SUCCEEDED(Result)) ::CoUninitialize(); } }; using ComMTA = ComInit<COINIT_MULTITHREADED>; int __cdecl main() { if (is_windows_nano() == S_OK) { ::puts("RegFree COM is not supported on Windows Nano. Auto-passing this test.\n"); return 100; } ComMTA init; if (FAILED(init.Result)) return -1; try { Validate_Numeric_In_ReturnByRef(); Validate_Float_In_ReturnAndUpdateByRef(); Validate_Double_In_ReturnAndUpdateByRef(); Validate_LCID_Marshaled(); Validate_Enumerator(); } catch (HRESULT hr) { ::printf("Test Failure: 0x%08x\n", hr); return 101; } return 100; } void Validate_Numeric_In_ReturnByRef() { HRESULT hr; CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") }; ComSmartPtr<IDispatchTesting> dispatchTesting; THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting)); LPOLESTR numericMethodName = (LPOLESTR)W("DoubleNumeric_ReturnByRef"); LCID lcid = MAKELCID(LANG_USER_DEFAULT, SORT_DEFAULT); DISPID methodId; ::wprintf(W("Invoke %s\n"), numericMethodName); THROW_IF_FAILED(dispatchTesting->GetIDsOfNames( IID_NULL, &numericMethodName, 1, lcid, &methodId)); BYTE b1 = 24; BYTE b2; SHORT s1 = 53; SHORT s2; USHORT us1 = 74; USHORT us2; LONG i1 = 34; LONG i2; ULONG ui1 = 854; ULONG ui2; LONGLONG l1 = 894; LONGLONG l2; ULONGLONG ul1 = 4168; ULONGLONG ul2; { DISPPARAMS params; params.cArgs = 14; params.rgvarg = new VARIANTARG[params.cArgs]; params.cNamedArgs = 0; params.rgdispidNamedArgs = nullptr; V_VT(&params.rgvarg[13]) = VT_UI1; V_UI1(&params.rgvarg[13]) = b1; V_VT(&params.rgvarg[12]) = VT_BYREF | VT_UI1; V_UI1REF(&params.rgvarg[12]) = &b2; V_VT(&params.rgvarg[11]) = VT_I2; V_I2(&params.rgvarg[11]) = s1; V_VT(&params.rgvarg[10]) = VT_BYREF | VT_I2; V_I2REF(&params.rgvarg[10]) = &s2; V_VT(&params.rgvarg[9]) = VT_UI2; V_UI2(&params.rgvarg[9]) = us1; V_VT(&params.rgvarg[8]) = VT_BYREF | VT_UI2; V_UI2REF(&params.rgvarg[8]) = &us2; V_VT(&params.rgvarg[7]) = VT_I4; V_I4(&params.rgvarg[7]) = i1; V_VT(&params.rgvarg[6]) = VT_BYREF | VT_I4; V_I4REF(&params.rgvarg[6]) = &i2; V_VT(&params.rgvarg[5]) = VT_UI4; V_UI4(&params.rgvarg[5]) = ui1; V_VT(&params.rgvarg[4]) = VT_BYREF | VT_UI4; V_UI4REF(&params.rgvarg[4]) = &ui2; V_VT(&params.rgvarg[3]) = VT_I8; V_I8(&params.rgvarg[3]) = l1; V_VT(&params.rgvarg[2]) = VT_BYREF | VT_I8; V_I8REF(&params.rgvarg[2]) = &l2; V_VT(&params.rgvarg[1]) = VT_UI8; V_UI8(&params.rgvarg[1]) = ul1; V_VT(&params.rgvarg[0]) = VT_BYREF | VT_UI8; V_UI8REF(&params.rgvarg[0]) = &ul2; THROW_IF_FAILED(dispatchTesting->Invoke( methodId, IID_NULL, lcid, DISPATCH_METHOD, &params, nullptr, nullptr, nullptr )); THROW_FAIL_IF_FALSE(b2 == b1 * 2); THROW_FAIL_IF_FALSE(s2 == s1 * 2); THROW_FAIL_IF_FALSE(us2 == us1 * 2); THROW_FAIL_IF_FALSE(i2 == i1 * 2); THROW_FAIL_IF_FALSE(ui2 == ui1 * 2); THROW_FAIL_IF_FALSE(l2 == l1 * 2); THROW_FAIL_IF_FALSE(ul2 == ul1 * 2); } { b2 = 0; s2 = 0; us2 = 0; i2 = 0; ui2 = 0; l2 = 0; ul2 = 0; THROW_IF_FAILED(dispatchTesting->DoubleNumeric_ReturnByRef(b1, &b2, s1, &s2, us1, &us2, i1, (INT*)&i2, ui1, (UINT*)&ui2, l1, &l2, ul1, &ul2)); THROW_FAIL_IF_FALSE(b2 == b1 * 2); THROW_FAIL_IF_FALSE(s2 == s1 * 2); THROW_FAIL_IF_FALSE(us2 == us1 * 2); THROW_FAIL_IF_FALSE(i2 == i1 * 2); THROW_FAIL_IF_FALSE(ui2 == ui1 * 2); THROW_FAIL_IF_FALSE(l2 == l1 * 2); THROW_FAIL_IF_FALSE(ul2 == ul1 * 2); } } namespace { bool EqualByBound(float expected, float actual) { float low = expected - 0.0001f; float high = expected + 0.0001f; float eps = abs(expected - actual); return eps < std::numeric_limits<float>::epsilon() || (low < actual && actual < high); } bool EqualByBound(double expected, double actual) { double low = expected - 0.00001; double high = expected + 0.00001; double eps = abs(expected - actual); return eps < std::numeric_limits<double>::epsilon() || (low < actual && actual < high); } } void Validate_Float_In_ReturnAndUpdateByRef() { HRESULT hr; CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") }; ComSmartPtr<IDispatchTesting> dispatchTesting; THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting)); LPOLESTR numericMethodName = (LPOLESTR)W("Add_Float_ReturnAndUpdateByRef"); LCID lcid = MAKELCID(LANG_USER_DEFAULT, SORT_DEFAULT); DISPID methodId; ::wprintf(W("Invoke %s\n"), numericMethodName); THROW_IF_FAILED(dispatchTesting->GetIDsOfNames( IID_NULL, &numericMethodName, 1, lcid, &methodId)); const float a = 12.34f; const float b_orig = 1.234f; const float expected = b_orig + a; float b = b_orig; { DISPPARAMS params; params.cArgs = 2; params.rgvarg = new VARIANTARG[params.cArgs]; params.cNamedArgs = 0; params.rgdispidNamedArgs = nullptr; VARIANT result; V_VT(&params.rgvarg[1]) = VT_R4; V_R4(&params.rgvarg[1]) = a; V_VT(&params.rgvarg[0]) = VT_BYREF | VT_R4; V_R4REF(&params.rgvarg[0]) = &b; THROW_IF_FAILED(dispatchTesting->Invoke( methodId, IID_NULL, lcid, DISPATCH_METHOD, &params, &result, nullptr, nullptr )); THROW_FAIL_IF_FALSE(EqualByBound(expected, V_R4(&result))); THROW_FAIL_IF_FALSE(EqualByBound(expected, b)); } { b = b_orig; float result; THROW_IF_FAILED(dispatchTesting->Add_Float_ReturnAndUpdateByRef(a, &b, &result)); THROW_FAIL_IF_FALSE(EqualByBound(expected, result)); THROW_FAIL_IF_FALSE(EqualByBound(expected, b)); } } void Validate_Double_In_ReturnAndUpdateByRef() { HRESULT hr; CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") }; ComSmartPtr<IDispatchTesting> dispatchTesting; THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting)); LPOLESTR numericMethodName = (LPOLESTR)W("Add_Double_ReturnAndUpdateByRef"); LCID lcid = MAKELCID(LANG_USER_DEFAULT, SORT_DEFAULT); DISPID methodId; ::wprintf(W("Invoke %s\n"), numericMethodName); THROW_IF_FAILED(dispatchTesting->GetIDsOfNames( IID_NULL, &numericMethodName, 1, lcid, &methodId)); const double a = 1856.5634; const double b_orig = 587867.757; const double expected = a + b_orig; double b = b_orig; { DISPPARAMS params; params.cArgs = 2; params.rgvarg = new VARIANTARG[params.cArgs]; params.cNamedArgs = 0; params.rgdispidNamedArgs = nullptr; VARIANT result; V_VT(&params.rgvarg[1]) = VT_R8; V_R8(&params.rgvarg[1]) = a; V_VT(&params.rgvarg[0]) = VT_BYREF | VT_R8; V_R8REF(&params.rgvarg[0]) = &b; THROW_IF_FAILED(dispatchTesting->Invoke( methodId, IID_NULL, lcid, DISPATCH_METHOD, &params, &result, nullptr, nullptr )); THROW_FAIL_IF_FALSE(EqualByBound(expected, V_R8(&result))); THROW_FAIL_IF_FALSE(EqualByBound(expected, b)); } { b = b_orig; double result; THROW_IF_FAILED(dispatchTesting->Add_Double_ReturnAndUpdateByRef(a, &b, &result)); THROW_FAIL_IF_FALSE(EqualByBound(expected, result)); THROW_FAIL_IF_FALSE(EqualByBound(expected, b)); } } void Validate_LCID_Marshaled() { HRESULT hr; CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") }; ComSmartPtr<IDispatchTesting> dispatchTesting; THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting)); LPOLESTR numericMethodName = (LPOLESTR)W("PassThroughLCID"); LCID lcid = MAKELCID(MAKELANGID(LANG_SPANISH, SUBLANG_SPANISH_CHILE), SORT_DEFAULT); DISPID methodId; ::wprintf(W("Invoke %s\n"), numericMethodName); THROW_IF_FAILED(dispatchTesting->GetIDsOfNames( IID_NULL, &numericMethodName, 1, lcid, &methodId)); DISPPARAMS params; params.cArgs = 0; params.rgvarg = nullptr; params.cNamedArgs = 0; params.rgdispidNamedArgs = nullptr; VARIANT result; THROW_IF_FAILED(dispatchTesting->Invoke( methodId, IID_NULL, lcid, DISPATCH_METHOD, &params, &result, nullptr, nullptr )); THROW_FAIL_IF_FALSE(lcid == V_I4(&result)); } namespace { void ValidateExpectedEnumVariant(IEnumVARIANT *enumVariant, int expectedStart, int expectedCount) { HRESULT hr; VARIANT element; ULONG numFetched; for(int i = expectedStart; i < expectedStart + expectedCount; ++i) { THROW_IF_FAILED(enumVariant->Next(1, &element, &numFetched)); THROW_FAIL_IF_FALSE(numFetched == 1); THROW_FAIL_IF_FALSE(V_I4(&element) == i) ::VariantClear(&element); } hr = enumVariant->Next(1, &element, &numFetched); THROW_FAIL_IF_FALSE(hr == S_FALSE && numFetched == 0); } void ValidateReturnedEnumerator(VARIANT *toValidate) { HRESULT hr; THROW_FAIL_IF_FALSE(V_VT(toValidate) == VT_UNKNOWN || V_VT(toValidate) == VT_DISPATCH); ComSmartPtr<IEnumVARIANT> enumVariant; THROW_IF_FAILED(V_UNKNOWN(toValidate)->QueryInterface<IEnumVARIANT>(&enumVariant)); // Implementation of IDispatchTesting should return [0,9] ValidateExpectedEnumVariant(enumVariant, 0, 10); THROW_IF_FAILED(enumVariant->Reset()); ValidateExpectedEnumVariant(enumVariant, 0, 10); THROW_IF_FAILED(enumVariant->Reset()); THROW_IF_FAILED(enumVariant->Skip(3)); ValidateExpectedEnumVariant(enumVariant, 3, 7); } } void Validate_Enumerator() { HRESULT hr; CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") }; ComSmartPtr<IDispatchTesting> dispatchTesting; THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting)); LCID lcid = MAKELCID(LANG_USER_DEFAULT, SORT_DEFAULT); ::printf("Invoke GetEnumerator (DISPID_NEWENUM)\n"); DISPPARAMS params {}; VARIANT result; THROW_IF_FAILED(dispatchTesting->Invoke( DISPID_NEWENUM, IID_NULL, lcid, DISPATCH_METHOD, &params, &result, nullptr, nullptr )); ::printf(" -- Validate returned IEnumVARIANT\n"); ValidateReturnedEnumerator(&result); LPOLESTR methodName = (LPOLESTR)W("ExplicitGetEnumerator"); ::wprintf(W("Invoke %s\n"), methodName); DISPID methodId; THROW_IF_FAILED(dispatchTesting->GetIDsOfNames( IID_NULL, &methodName, 1, lcid, &methodId)); ::VariantClear(&result); THROW_IF_FAILED(dispatchTesting->Invoke( methodId, IID_NULL, lcid, DISPATCH_METHOD, &params, &result, nullptr, nullptr )); ::printf(" -- Validate returned IEnumVARIANT\n"); ValidateReturnedEnumerator(&result); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "ClientTests.h" #include <memory> #include <windows_version_helpers.h> void Validate_Numeric_In_ReturnByRef(); void Validate_Float_In_ReturnAndUpdateByRef(); void Validate_Double_In_ReturnAndUpdateByRef(); void Validate_LCID_Marshaled(); void Validate_Enumerator(); template<COINIT TM> struct ComInit { const HRESULT Result; ComInit() : Result{ ::CoInitializeEx(nullptr, TM) } { } ~ComInit() { if (SUCCEEDED(Result)) ::CoUninitialize(); } }; using ComMTA = ComInit<COINIT_MULTITHREADED>; int __cdecl main() { if (is_windows_nano() == S_OK) { ::puts("RegFree COM is not supported on Windows Nano. Auto-passing this test.\n"); return 100; } ComMTA init; if (FAILED(init.Result)) return -1; try { Validate_Numeric_In_ReturnByRef(); Validate_Float_In_ReturnAndUpdateByRef(); Validate_Double_In_ReturnAndUpdateByRef(); Validate_LCID_Marshaled(); Validate_Enumerator(); } catch (HRESULT hr) { ::printf("Test Failure: 0x%08x\n", hr); return 101; } return 100; } void Validate_Numeric_In_ReturnByRef() { HRESULT hr; CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") }; ComSmartPtr<IDispatchTesting> dispatchTesting; THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting)); LPOLESTR numericMethodName = (LPOLESTR)W("DoubleNumeric_ReturnByRef"); LCID lcid = MAKELCID(LANG_USER_DEFAULT, SORT_DEFAULT); DISPID methodId; ::wprintf(W("Invoke %s\n"), numericMethodName); THROW_IF_FAILED(dispatchTesting->GetIDsOfNames( IID_NULL, &numericMethodName, 1, lcid, &methodId)); BYTE b1 = 24; BYTE b2; SHORT s1 = 53; SHORT s2; USHORT us1 = 74; USHORT us2; LONG i1 = 34; LONG i2; ULONG ui1 = 854; ULONG ui2; LONGLONG l1 = 894; LONGLONG l2; ULONGLONG ul1 = 4168; ULONGLONG ul2; { DISPPARAMS params; params.cArgs = 14; params.rgvarg = new VARIANTARG[params.cArgs]; params.cNamedArgs = 0; params.rgdispidNamedArgs = nullptr; V_VT(&params.rgvarg[13]) = VT_UI1; V_UI1(&params.rgvarg[13]) = b1; V_VT(&params.rgvarg[12]) = VT_BYREF | VT_UI1; V_UI1REF(&params.rgvarg[12]) = &b2; V_VT(&params.rgvarg[11]) = VT_I2; V_I2(&params.rgvarg[11]) = s1; V_VT(&params.rgvarg[10]) = VT_BYREF | VT_I2; V_I2REF(&params.rgvarg[10]) = &s2; V_VT(&params.rgvarg[9]) = VT_UI2; V_UI2(&params.rgvarg[9]) = us1; V_VT(&params.rgvarg[8]) = VT_BYREF | VT_UI2; V_UI2REF(&params.rgvarg[8]) = &us2; V_VT(&params.rgvarg[7]) = VT_I4; V_I4(&params.rgvarg[7]) = i1; V_VT(&params.rgvarg[6]) = VT_BYREF | VT_I4; V_I4REF(&params.rgvarg[6]) = &i2; V_VT(&params.rgvarg[5]) = VT_UI4; V_UI4(&params.rgvarg[5]) = ui1; V_VT(&params.rgvarg[4]) = VT_BYREF | VT_UI4; V_UI4REF(&params.rgvarg[4]) = &ui2; V_VT(&params.rgvarg[3]) = VT_I8; V_I8(&params.rgvarg[3]) = l1; V_VT(&params.rgvarg[2]) = VT_BYREF | VT_I8; V_I8REF(&params.rgvarg[2]) = &l2; V_VT(&params.rgvarg[1]) = VT_UI8; V_UI8(&params.rgvarg[1]) = ul1; V_VT(&params.rgvarg[0]) = VT_BYREF | VT_UI8; V_UI8REF(&params.rgvarg[0]) = &ul2; THROW_IF_FAILED(dispatchTesting->Invoke( methodId, IID_NULL, lcid, DISPATCH_METHOD, &params, nullptr, nullptr, nullptr )); THROW_FAIL_IF_FALSE(b2 == b1 * 2); THROW_FAIL_IF_FALSE(s2 == s1 * 2); THROW_FAIL_IF_FALSE(us2 == us1 * 2); THROW_FAIL_IF_FALSE(i2 == i1 * 2); THROW_FAIL_IF_FALSE(ui2 == ui1 * 2); THROW_FAIL_IF_FALSE(l2 == l1 * 2); THROW_FAIL_IF_FALSE(ul2 == ul1 * 2); } { b2 = 0; s2 = 0; us2 = 0; i2 = 0; ui2 = 0; l2 = 0; ul2 = 0; THROW_IF_FAILED(dispatchTesting->DoubleNumeric_ReturnByRef(b1, &b2, s1, &s2, us1, &us2, i1, (INT*)&i2, ui1, (UINT*)&ui2, l1, &l2, ul1, &ul2)); THROW_FAIL_IF_FALSE(b2 == b1 * 2); THROW_FAIL_IF_FALSE(s2 == s1 * 2); THROW_FAIL_IF_FALSE(us2 == us1 * 2); THROW_FAIL_IF_FALSE(i2 == i1 * 2); THROW_FAIL_IF_FALSE(ui2 == ui1 * 2); THROW_FAIL_IF_FALSE(l2 == l1 * 2); THROW_FAIL_IF_FALSE(ul2 == ul1 * 2); } } namespace { bool EqualByBound(float expected, float actual) { float low = expected - 0.0001f; float high = expected + 0.0001f; float eps = abs(expected - actual); return eps < std::numeric_limits<float>::epsilon() || (low < actual && actual < high); } bool EqualByBound(double expected, double actual) { double low = expected - 0.00001; double high = expected + 0.00001; double eps = abs(expected - actual); return eps < std::numeric_limits<double>::epsilon() || (low < actual && actual < high); } } void Validate_Float_In_ReturnAndUpdateByRef() { HRESULT hr; CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") }; ComSmartPtr<IDispatchTesting> dispatchTesting; THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting)); LPOLESTR numericMethodName = (LPOLESTR)W("Add_Float_ReturnAndUpdateByRef"); LCID lcid = MAKELCID(LANG_USER_DEFAULT, SORT_DEFAULT); DISPID methodId; ::wprintf(W("Invoke %s\n"), numericMethodName); THROW_IF_FAILED(dispatchTesting->GetIDsOfNames( IID_NULL, &numericMethodName, 1, lcid, &methodId)); const float a = 12.34f; const float b_orig = 1.234f; const float expected = b_orig + a; float b = b_orig; { DISPPARAMS params; params.cArgs = 2; params.rgvarg = new VARIANTARG[params.cArgs]; params.cNamedArgs = 0; params.rgdispidNamedArgs = nullptr; VARIANT result; V_VT(&params.rgvarg[1]) = VT_R4; V_R4(&params.rgvarg[1]) = a; V_VT(&params.rgvarg[0]) = VT_BYREF | VT_R4; V_R4REF(&params.rgvarg[0]) = &b; THROW_IF_FAILED(dispatchTesting->Invoke( methodId, IID_NULL, lcid, DISPATCH_METHOD, &params, &result, nullptr, nullptr )); THROW_FAIL_IF_FALSE(EqualByBound(expected, V_R4(&result))); THROW_FAIL_IF_FALSE(EqualByBound(expected, b)); } { b = b_orig; float result; THROW_IF_FAILED(dispatchTesting->Add_Float_ReturnAndUpdateByRef(a, &b, &result)); THROW_FAIL_IF_FALSE(EqualByBound(expected, result)); THROW_FAIL_IF_FALSE(EqualByBound(expected, b)); } } void Validate_Double_In_ReturnAndUpdateByRef() { HRESULT hr; CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") }; ComSmartPtr<IDispatchTesting> dispatchTesting; THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting)); LPOLESTR numericMethodName = (LPOLESTR)W("Add_Double_ReturnAndUpdateByRef"); LCID lcid = MAKELCID(LANG_USER_DEFAULT, SORT_DEFAULT); DISPID methodId; ::wprintf(W("Invoke %s\n"), numericMethodName); THROW_IF_FAILED(dispatchTesting->GetIDsOfNames( IID_NULL, &numericMethodName, 1, lcid, &methodId)); const double a = 1856.5634; const double b_orig = 587867.757; const double expected = a + b_orig; double b = b_orig; { DISPPARAMS params; params.cArgs = 2; params.rgvarg = new VARIANTARG[params.cArgs]; params.cNamedArgs = 0; params.rgdispidNamedArgs = nullptr; VARIANT result; V_VT(&params.rgvarg[1]) = VT_R8; V_R8(&params.rgvarg[1]) = a; V_VT(&params.rgvarg[0]) = VT_BYREF | VT_R8; V_R8REF(&params.rgvarg[0]) = &b; THROW_IF_FAILED(dispatchTesting->Invoke( methodId, IID_NULL, lcid, DISPATCH_METHOD, &params, &result, nullptr, nullptr )); THROW_FAIL_IF_FALSE(EqualByBound(expected, V_R8(&result))); THROW_FAIL_IF_FALSE(EqualByBound(expected, b)); } { b = b_orig; double result; THROW_IF_FAILED(dispatchTesting->Add_Double_ReturnAndUpdateByRef(a, &b, &result)); THROW_FAIL_IF_FALSE(EqualByBound(expected, result)); THROW_FAIL_IF_FALSE(EqualByBound(expected, b)); } } void Validate_LCID_Marshaled() { HRESULT hr; CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") }; ComSmartPtr<IDispatchTesting> dispatchTesting; THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting)); LPOLESTR numericMethodName = (LPOLESTR)W("PassThroughLCID"); LCID lcid = MAKELCID(MAKELANGID(LANG_SPANISH, SUBLANG_SPANISH_CHILE), SORT_DEFAULT); DISPID methodId; ::wprintf(W("Invoke %s\n"), numericMethodName); THROW_IF_FAILED(dispatchTesting->GetIDsOfNames( IID_NULL, &numericMethodName, 1, lcid, &methodId)); DISPPARAMS params; params.cArgs = 0; params.rgvarg = nullptr; params.cNamedArgs = 0; params.rgdispidNamedArgs = nullptr; VARIANT result; THROW_IF_FAILED(dispatchTesting->Invoke( methodId, IID_NULL, lcid, DISPATCH_METHOD, &params, &result, nullptr, nullptr )); THROW_FAIL_IF_FALSE(lcid == (LCID)V_UI4(&result)); } namespace { void ValidateExpectedEnumVariant(IEnumVARIANT *enumVariant, int expectedStart, int expectedCount) { HRESULT hr; VARIANT element; ULONG numFetched; for(int i = expectedStart; i < expectedStart + expectedCount; ++i) { THROW_IF_FAILED(enumVariant->Next(1, &element, &numFetched)); THROW_FAIL_IF_FALSE(numFetched == 1); THROW_FAIL_IF_FALSE(V_I4(&element) == i) ::VariantClear(&element); } hr = enumVariant->Next(1, &element, &numFetched); THROW_FAIL_IF_FALSE(hr == S_FALSE && numFetched == 0); } void ValidateReturnedEnumerator(VARIANT *toValidate) { HRESULT hr; THROW_FAIL_IF_FALSE(V_VT(toValidate) == VT_UNKNOWN || V_VT(toValidate) == VT_DISPATCH); ComSmartPtr<IEnumVARIANT> enumVariant; THROW_IF_FAILED(V_UNKNOWN(toValidate)->QueryInterface<IEnumVARIANT>(&enumVariant)); // Implementation of IDispatchTesting should return [0,9] ValidateExpectedEnumVariant(enumVariant, 0, 10); THROW_IF_FAILED(enumVariant->Reset()); ValidateExpectedEnumVariant(enumVariant, 0, 10); THROW_IF_FAILED(enumVariant->Reset()); THROW_IF_FAILED(enumVariant->Skip(3)); ValidateExpectedEnumVariant(enumVariant, 3, 7); } } void Validate_Enumerator() { HRESULT hr; CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") }; ComSmartPtr<IDispatchTesting> dispatchTesting; THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting)); LCID lcid = MAKELCID(LANG_USER_DEFAULT, SORT_DEFAULT); ::printf("Invoke GetEnumerator (DISPID_NEWENUM)\n"); DISPPARAMS params {}; VARIANT result; THROW_IF_FAILED(dispatchTesting->Invoke( DISPID_NEWENUM, IID_NULL, lcid, DISPATCH_METHOD, &params, &result, nullptr, nullptr )); ::printf(" -- Validate returned IEnumVARIANT\n"); ValidateReturnedEnumerator(&result); LPOLESTR methodName = (LPOLESTR)W("ExplicitGetEnumerator"); ::wprintf(W("Invoke %s\n"), methodName); DISPID methodId; THROW_IF_FAILED(dispatchTesting->GetIDsOfNames( IID_NULL, &methodName, 1, lcid, &methodId)); ::VariantClear(&result); THROW_IF_FAILED(dispatchTesting->Invoke( methodId, IID_NULL, lcid, DISPATCH_METHOD, &params, &result, nullptr, nullptr )); ::printf(" -- Validate returned IEnumVARIANT\n"); ValidateReturnedEnumerator(&result); }
1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/tests/Interop/MonoAPI/Native/mono-embedding-api-test/CMakeLists.txt
cmake_minimum_required(VERSION 3.13.0) project (MonoEmbeddingApiTest) include_directories(${INC_PLATFORM_DIR}) add_subdirectory(${CLR_SRC_NATIVE_DIR}/public public_api) # add the library add_library (mono-embedding-api-test SHARED mono-embedding-api-test.c api-types.h api-functions.h) target_link_libraries(mono-embedding-api-test monoapi ${LINK_LIBRARIES_ADDITIONAL}) if(CLR_CMAKE_HOST_OSX) target_compile_definitions(mono-embedding-api-test PRIVATE -DHOST_DARWIN) elseif(CLR_CMAKE_HOST_WIN32) target_compile_definitions(mono-embedding-api-test PRIVATE -DHOST_WIN32 -D_CRT_SECURE_NO_WARNINGS) else() target_compile_definitions(mono-embedding-api-test PRIVATE -DHOST_LINUX) endif() # add the install targets install (TARGETS mono-embedding-api-test DESTINATION bin)
cmake_minimum_required(VERSION 3.13.0) project (MonoEmbeddingApiTest) include_directories(${INC_PLATFORM_DIR}) add_subdirectory(${CLR_SRC_NATIVE_DIR}/public public_api) # add the library add_library (mono-embedding-api-test SHARED mono-embedding-api-test.c api-types.h api-functions.h) set_target_properties(mono-embedding-api-test PROPERTIES MSVC_WARNING_LEVEL 3) target_link_libraries(mono-embedding-api-test monoapi ${LINK_LIBRARIES_ADDITIONAL}) if(CLR_CMAKE_HOST_OSX) target_compile_definitions(mono-embedding-api-test PRIVATE -DHOST_DARWIN) elseif(CLR_CMAKE_HOST_WIN32) target_compile_definitions(mono-embedding-api-test PRIVATE -DHOST_WIN32 -D_CRT_SECURE_NO_WARNINGS) else() target_compile_definitions(mono-embedding-api-test PRIVATE -DHOST_LINUX) endif() # add the install targets install (TARGETS mono-embedding-api-test DESTINATION bin)
1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/pal/tests/palsuite/c_runtime/fputs/test1/test1.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test1.c ** ** Purpose: Call fputs twice and write two strings to a file. Then ** call fread on the file and check that the data which was written is what ** we expect it to be. ** ** **===================================================================*/ #include <palsuite.h> PALTEST(c_runtime_fputs_test1_paltest_fputs_test1, "c_runtime/fputs/test1/paltest_fputs_test1") { FILE* TheFile; char* StringOne = "FooBar"; char* StringTwo = "BarFoo"; char* CompleteString = "FooBarBarFoo"; char ReadBuffer[64]; int ret; if (PAL_Initialize(argc, argv)) { return FAIL; } /* Open the file that we'll be working with */ TheFile = fopen("TestFile", "w+"); if(TheFile == NULL) { Fail("ERROR: fopen failed to open the file 'TestFile' in read/write " "mode.\n"); } /* Call fputs twice to write two strings to the file stream */ if(fputs(StringOne, TheFile) < 0) { Fail("ERROR: fputs returned a negative value when attempting to " "put the string '%s' to the file.\n",StringOne); } if(fputs(StringTwo, TheFile) < 0) { Fail("ERROR: fputs returned a negative value when attempting to " "put the string '%s' to the file.\n",StringTwo); } /* Flush the buffers */ if(fflush(TheFile) != 0) { Fail("ERROR: fflush failed to properly flush the buffers.\n"); } /* Now read from the file to ensure the data was written correctly. Note: We read more than what was written to make sure nothing extra was written. */ if(fseek(TheFile, 0, SEEK_SET) != 0) { Fail("ERROR: fseek failed to set the file pointer back to the start " "of the file.\n"); } if((ret = fread(ReadBuffer, 1, 20, TheFile)) != 12) { Fail("ERROR: fread should have returned that it read in 12 characters " "from the file, but instead it returned %d.\n", ret); } ReadBuffer[ret] = '\0'; if(strcmp(ReadBuffer, CompleteString) != 0) { Fail("ERROR: The data read back from the file is not exactly the same " "as the data that was written by fputs. The file contains '%s' " "instead of '%s'.\n",ReadBuffer, CompleteString); } if(fclose(TheFile) != 0) { Fail("ERROR: fclose failed to close the file stream.\n"); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test1.c ** ** Purpose: Call fputs twice and write two strings to a file. Then ** call fread on the file and check that the data which was written is what ** we expect it to be. ** ** **===================================================================*/ #include <palsuite.h> PALTEST(c_runtime_fputs_test1_paltest_fputs_test1, "c_runtime/fputs/test1/paltest_fputs_test1") { FILE* TheFile; char* StringOne = "FooBar"; char* StringTwo = "BarFoo"; char* CompleteString = "FooBarBarFoo"; char ReadBuffer[64]; int ret; if (PAL_Initialize(argc, argv)) { return FAIL; } /* Open the file that we'll be working with */ TheFile = fopen("TestFile", "w+"); if(TheFile == NULL) { Fail("ERROR: fopen failed to open the file 'TestFile' in read/write " "mode.\n"); } /* Call fputs twice to write two strings to the file stream */ if(fputs(StringOne, TheFile) < 0) { Fail("ERROR: fputs returned a negative value when attempting to " "put the string '%s' to the file.\n",StringOne); } if(fputs(StringTwo, TheFile) < 0) { Fail("ERROR: fputs returned a negative value when attempting to " "put the string '%s' to the file.\n",StringTwo); } /* Flush the buffers */ if(fflush(TheFile) != 0) { Fail("ERROR: fflush failed to properly flush the buffers.\n"); } /* Now read from the file to ensure the data was written correctly. Note: We read more than what was written to make sure nothing extra was written. */ if(fseek(TheFile, 0, SEEK_SET) != 0) { Fail("ERROR: fseek failed to set the file pointer back to the start " "of the file.\n"); } if((ret = fread(ReadBuffer, 1, 20, TheFile)) != 12) { Fail("ERROR: fread should have returned that it read in 12 characters " "from the file, but instead it returned %d.\n", ret); } ReadBuffer[ret] = '\0'; if(strcmp(ReadBuffer, CompleteString) != 0) { Fail("ERROR: The data read back from the file is not exactly the same " "as the data that was written by fputs. The file contains '%s' " "instead of '%s'.\n",ReadBuffer, CompleteString); } if(fclose(TheFile) != 0) { Fail("ERROR: fclose failed to close the file stream.\n"); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/vm/nativeimage.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // -------------------------------------------------------------------------------- // NativeImage.cpp // // -------------------------------------------------------------------------------- #include "common.h" #include "nativeimage.h" // -------------------------------------------------------------------------------- // Headers // -------------------------------------------------------------------------------- #include <shlwapi.h> BOOL AssemblyNameIndexHashTraits::Equals(LPCUTF8 a, LPCUTF8 b) { WRAPPER_NO_CONTRACT; return SString(SString::Utf8Literal, a).CompareCaseInsensitive(SString(SString::Utf8Literal, b)) == 0; } AssemblyNameIndexHashTraits::count_t AssemblyNameIndexHashTraits::Hash(LPCUTF8 s) { WRAPPER_NO_CONTRACT; return SString(SString::Utf8Literal, s).HashCaseInsensitive(); } BOOL NativeImageIndexTraits::Equals(LPCUTF8 a, LPCUTF8 b) { WRAPPER_NO_CONTRACT; return SString(SString::Utf8Literal, a).CompareCaseInsensitive(SString(SString::Utf8Literal, b)) == 0; } NativeImageIndexTraits::count_t NativeImageIndexTraits::Hash(LPCUTF8 a) { WRAPPER_NO_CONTRACT; return SString(SString::Utf8Literal, a).HashCaseInsensitive(); } NativeImage::NativeImage(AssemblyBinder *pAssemblyBinder, PEImageLayout *pImageLayout, LPCUTF8 imageFileName) : m_eagerFixupsLock(CrstNativeImageEagerFixups) { CONTRACTL { THROWS; CONSTRUCTOR_CHECK; STANDARD_VM_CHECK; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; m_pAssemblyBinder = pAssemblyBinder; m_pImageLayout = pImageLayout; m_fileName = imageFileName; m_eagerFixupsHaveRun = false; m_readyToRunCodeDisabled = false; } void NativeImage::Initialize(READYTORUN_HEADER *pHeader, LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker) { LoaderHeap *pHeap = pLoaderAllocator->GetHighFrequencyHeap(); m_pReadyToRunInfo = new ReadyToRunInfo(/*pModule*/ NULL, pLoaderAllocator, m_pImageLayout, pHeader, /*compositeImage*/ NULL, pamTracker); m_pComponentAssemblies = m_pReadyToRunInfo->FindSection(ReadyToRunSectionType::ComponentAssemblies); m_pComponentAssemblyMvids = m_pReadyToRunInfo->FindSection(ReadyToRunSectionType::ManifestAssemblyMvids); m_componentAssemblyCount = m_pComponentAssemblies->Size / sizeof(READYTORUN_COMPONENT_ASSEMBLIES_ENTRY); // Check if the current module's image has native manifest metadata, otherwise the current->GetNativeAssemblyImport() asserts. m_pManifestMetadata = LoadManifestMetadata(); HENUMInternal assemblyEnum; HRESULT hr = m_pManifestMetadata->EnumAllInit(mdtAssemblyRef, &assemblyEnum); mdAssemblyRef assemblyRef; m_manifestAssemblyCount = 0; while (m_pManifestMetadata->EnumNext(&assemblyEnum, &assemblyRef)) { LPCSTR assemblyName; hr = m_pManifestMetadata->GetAssemblyRefProps(assemblyRef, NULL, NULL, &assemblyName, NULL, NULL, NULL, NULL); m_assemblySimpleNameToIndexMap.Add(AssemblyNameIndex(assemblyName, m_manifestAssemblyCount)); m_manifestAssemblyCount++; } // When a composite image contributes to a larger version bubble, its manifest assembly // count may exceed its component assembly count as it may contain references to // assemblies outside of the composite image that are part of its version bubble. _ASSERTE(m_manifestAssemblyCount >= m_componentAssemblyCount); S_SIZE_T dwAllocSize = S_SIZE_T(sizeof(PTR_Assembly)) * S_SIZE_T(m_manifestAssemblyCount); // Note: Memory allocated on loader heap is zero filled m_pNativeMetadataAssemblyRefMap = (PTR_Assembly*)pamTracker->Track(pLoaderAllocator->GetLowFrequencyHeap()->AllocMem(dwAllocSize)); } NativeImage::~NativeImage() { STANDARD_VM_CONTRACT; delete m_pReadyToRunInfo; delete m_pImageLayout; if (m_pManifestMetadata != NULL) { m_pManifestMetadata->Release(); } } #ifndef DACCESS_COMPILE NativeImage *NativeImage::Open( Module *componentModule, LPCUTF8 nativeImageFileName, AssemblyBinder *pAssemblyBinder, LoaderAllocator *pLoaderAllocator, /* out */ bool *isNewNativeImage) { STANDARD_VM_CONTRACT; NativeImage *pExistingImage = AppDomain::GetCurrentDomain()->GetNativeImage(nativeImageFileName); if (pExistingImage != nullptr) { *isNewNativeImage = false; if (pExistingImage->GetAssemblyBinder() == pAssemblyBinder) { pExistingImage->AddComponentAssemblyToCache(componentModule->GetAssembly()); return pExistingImage; } else { return nullptr; } } SString path = componentModule->GetPath(); SString::Iterator lastPathSeparatorIter = path.End(); size_t pathDirLength = 0; if (PEAssembly::FindLastPathSeparator(path, lastPathSeparatorIter)) { pathDirLength = (lastPathSeparatorIter - path.Begin()) + 1; } SString compositeImageFileName(SString::Utf8, nativeImageFileName); SString fullPath; fullPath.Set(path, path.Begin(), (COUNT_T)pathDirLength); fullPath += compositeImageFileName; LPWSTR searchPathsConfig; IfFailThrow(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NativeImageSearchPaths, &searchPathsConfig)); PEImageLayoutHolder peLoadedImage; BundleFileLocation bundleFileLocation = Bundle::ProbeAppBundle(fullPath, /*pathIsBundleRelative */ true); if (bundleFileLocation.IsValid()) { // No need to use cache for this PE image. // Composite r2r PE image is not a part of anyone's identity. // We only need it to obtain the native image, which will be cached at AppDomain level. PEImageHolder pImage = PEImage::OpenImage(fullPath, MDInternalImport_NoCache, bundleFileLocation); PEImageLayout* loaded = pImage->GetOrCreateLayout(PEImageLayout::LAYOUT_LOADED); // We will let pImage instance be freed after exiting this scope, but we will keep the layout, // thus the layout needs an AddRef, or it will be gone together with pImage. loaded->AddRef(); peLoadedImage = loaded; } if (peLoadedImage.IsNull()) { EX_TRY { peLoadedImage = PEImageLayout::LoadNative(fullPath); } EX_CATCH { SString searchPaths(searchPathsConfig); SString::CIterator start = searchPaths.Begin(); while (start != searchPaths.End()) { SString::CIterator end = start; if (!searchPaths.Find(end, PATH_SEPARATOR_CHAR_W)) { end = searchPaths.End(); } fullPath.Set(searchPaths, start, (COUNT_T)(end - start)); if (end != searchPaths.End()) { // Skip path separator character ++end; } start = end; if (fullPath.GetCount() == 0) { continue; } fullPath.Append(DIRECTORY_SEPARATOR_CHAR_W); fullPath += compositeImageFileName; EX_TRY { peLoadedImage = PEImageLayout::LoadNative(fullPath); break; } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions) } } EX_END_CATCH(SwallowAllExceptions) if (peLoadedImage.IsNull()) { // Failed to locate the native composite R2R image LOG((LF_LOADER, LL_ALWAYS, "LOADER: failed to load native image '%s' for component assembly '%S' using search paths: '%S'\n", nativeImageFileName, path.GetUnicode(), searchPathsConfig != nullptr ? searchPathsConfig : W("<use COMPlus_NativeImageSearchPaths to set>"))); RaiseFailFastException(nullptr, nullptr, 0); } } READYTORUN_HEADER *pHeader = (READYTORUN_HEADER *)peLoadedImage->GetExport("RTR_HEADER"); if (pHeader == NULL) { COMPlusThrowHR(COR_E_BADIMAGEFORMAT); } if (pHeader->Signature != READYTORUN_SIGNATURE) { COMPlusThrowHR(COR_E_BADIMAGEFORMAT); } if (pHeader->MajorVersion < MINIMUM_READYTORUN_MAJOR_VERSION || pHeader->MajorVersion > READYTORUN_MAJOR_VERSION) { COMPlusThrowHR(COR_E_BADIMAGEFORMAT); } NewHolder<NativeImage> image = new NativeImage(pAssemblyBinder, peLoadedImage.Extract(), nativeImageFileName); AllocMemTracker amTracker; image->Initialize(pHeader, pLoaderAllocator, &amTracker); pExistingImage = AppDomain::GetCurrentDomain()->SetNativeImage(nativeImageFileName, image); if (pExistingImage == nullptr) { // No pre-existing image, new image has been stored in the map *isNewNativeImage = true; amTracker.SuppressRelease(); image->AddComponentAssemblyToCache(componentModule->GetAssembly()); return image.Extract(); } // Return pre-existing image if it was loaded into the same ALC, null otherwise *isNewNativeImage = false; if (pExistingImage->GetAssemblyBinder() == pAssemblyBinder) { pExistingImage->AddComponentAssemblyToCache(componentModule->GetAssembly()); return pExistingImage; } else { return nullptr; } } #endif #ifndef DACCESS_COMPILE void NativeImage::AddComponentAssemblyToCache(Assembly *assembly) { STANDARD_VM_CONTRACT; const AssemblyNameIndex *assemblyNameIndex = m_assemblySimpleNameToIndexMap.LookupPtr(assembly->GetSimpleName()); if (assemblyNameIndex != nullptr) { VolatileStore(&m_pNativeMetadataAssemblyRefMap[assemblyNameIndex->Index], assembly); } } #endif #ifndef DACCESS_COMPILE Assembly *NativeImage::LoadManifestAssembly(uint32_t rowid, DomainAssembly *pParentAssembly) { STANDARD_VM_CONTRACT; AssemblySpec spec; spec.InitializeSpec(TokenFromRid(rowid, mdtAssemblyRef), m_pManifestMetadata, pParentAssembly); return spec.LoadAssembly(FILE_LOADED); } #endif #ifndef DACCESS_COMPILE PTR_READYTORUN_CORE_HEADER NativeImage::GetComponentAssemblyHeader(LPCUTF8 simpleName) { STANDARD_VM_CONTRACT; const AssemblyNameIndex *assemblyNameIndex = m_assemblySimpleNameToIndexMap.LookupPtr(simpleName); if (assemblyNameIndex != NULL) { const BYTE *pImageBase = (const BYTE *)m_pImageLayout->GetBase(); const READYTORUN_COMPONENT_ASSEMBLIES_ENTRY *componentAssembly = (const READYTORUN_COMPONENT_ASSEMBLIES_ENTRY *)&pImageBase[m_pComponentAssemblies->VirtualAddress] + assemblyNameIndex->Index; return (PTR_READYTORUN_CORE_HEADER)&pImageBase[componentAssembly->ReadyToRunCoreHeader.VirtualAddress]; } return NULL; } #endif #ifndef DACCESS_COMPILE void NativeImage::CheckAssemblyMvid(Assembly *assembly) const { STANDARD_VM_CONTRACT; if (m_pComponentAssemblyMvids == NULL) { return; } const AssemblyNameIndex *assemblyNameIndex = m_assemblySimpleNameToIndexMap.LookupPtr(assembly->GetSimpleName()); if (assemblyNameIndex == NULL) { return; } GUID assemblyMvid; assembly->GetMDImport()->GetScopeProps(NULL, &assemblyMvid); const byte *pImageBase = (const BYTE *)m_pImageLayout->GetBase(); const GUID *componentMvid = (const GUID *)&pImageBase[m_pComponentAssemblyMvids->VirtualAddress] + assemblyNameIndex->Index; if (IsEqualGUID(*componentMvid, assemblyMvid)) { return; } static const size_t MVID_TEXT_LENGTH = 39; WCHAR assemblyMvidText[MVID_TEXT_LENGTH]; StringFromGUID2(assemblyMvid, assemblyMvidText, MVID_TEXT_LENGTH); WCHAR componentMvidText[MVID_TEXT_LENGTH]; StringFromGUID2(*componentMvid, componentMvidText, MVID_TEXT_LENGTH); SString message; message.Printf(W("MVID mismatch between loaded assembly '%s' (MVID = %s) and an assembly with the same simple name embedded in the native image '%s' (MVID = %s)"), SString(SString::Utf8, assembly->GetSimpleName()).GetUnicode(), assemblyMvidText, SString(SString::Utf8, GetFileName()).GetUnicode(), componentMvidText); EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_FAILFAST, message.GetUnicode()); } #endif #ifndef DACCESS_COMPILE IMDInternalImport *NativeImage::LoadManifestMetadata() { STANDARD_VM_CONTRACT; IMAGE_DATA_DIRECTORY *pMeta = m_pReadyToRunInfo->FindSection(ReadyToRunSectionType::ManifestMetadata); if (pMeta == NULL) { return NULL; } IMDInternalImport *pNewImport = NULL; IfFailThrow(GetMetaDataInternalInterface((BYTE *)m_pImageLayout->GetBase() + VAL32(pMeta->VirtualAddress), VAL32(pMeta->Size), ofRead, IID_IMDInternalImport, (void **) &pNewImport)); return pNewImport; } #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // -------------------------------------------------------------------------------- // NativeImage.cpp // // -------------------------------------------------------------------------------- #include "common.h" #include "nativeimage.h" // -------------------------------------------------------------------------------- // Headers // -------------------------------------------------------------------------------- #include <shlwapi.h> BOOL AssemblyNameIndexHashTraits::Equals(LPCUTF8 a, LPCUTF8 b) { WRAPPER_NO_CONTRACT; return SString(SString::Utf8Literal, a).CompareCaseInsensitive(SString(SString::Utf8Literal, b)) == 0; } AssemblyNameIndexHashTraits::count_t AssemblyNameIndexHashTraits::Hash(LPCUTF8 s) { WRAPPER_NO_CONTRACT; return SString(SString::Utf8Literal, s).HashCaseInsensitive(); } BOOL NativeImageIndexTraits::Equals(LPCUTF8 a, LPCUTF8 b) { WRAPPER_NO_CONTRACT; return SString(SString::Utf8Literal, a).CompareCaseInsensitive(SString(SString::Utf8Literal, b)) == 0; } NativeImageIndexTraits::count_t NativeImageIndexTraits::Hash(LPCUTF8 a) { WRAPPER_NO_CONTRACT; return SString(SString::Utf8Literal, a).HashCaseInsensitive(); } NativeImage::NativeImage(AssemblyBinder *pAssemblyBinder, PEImageLayout *pImageLayout, LPCUTF8 imageFileName) : m_eagerFixupsLock(CrstNativeImageEagerFixups) { CONTRACTL { THROWS; CONSTRUCTOR_CHECK; STANDARD_VM_CHECK; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; m_pAssemblyBinder = pAssemblyBinder; m_pImageLayout = pImageLayout; m_fileName = imageFileName; m_eagerFixupsHaveRun = false; m_readyToRunCodeDisabled = false; } void NativeImage::Initialize(READYTORUN_HEADER *pHeader, LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker) { LoaderHeap *pHeap = pLoaderAllocator->GetHighFrequencyHeap(); m_pReadyToRunInfo = new ReadyToRunInfo(/*pModule*/ NULL, pLoaderAllocator, m_pImageLayout, pHeader, /*compositeImage*/ NULL, pamTracker); m_pComponentAssemblies = m_pReadyToRunInfo->FindSection(ReadyToRunSectionType::ComponentAssemblies); m_pComponentAssemblyMvids = m_pReadyToRunInfo->FindSection(ReadyToRunSectionType::ManifestAssemblyMvids); m_componentAssemblyCount = m_pComponentAssemblies->Size / sizeof(READYTORUN_COMPONENT_ASSEMBLIES_ENTRY); // Check if the current module's image has native manifest metadata, otherwise the current->GetNativeAssemblyImport() asserts. m_pManifestMetadata = LoadManifestMetadata(); HENUMInternal assemblyEnum; HRESULT hr = m_pManifestMetadata->EnumAllInit(mdtAssemblyRef, &assemblyEnum); mdAssemblyRef assemblyRef; m_manifestAssemblyCount = 0; while (m_pManifestMetadata->EnumNext(&assemblyEnum, &assemblyRef)) { LPCSTR assemblyName; hr = m_pManifestMetadata->GetAssemblyRefProps(assemblyRef, NULL, NULL, &assemblyName, NULL, NULL, NULL, NULL); m_assemblySimpleNameToIndexMap.Add(AssemblyNameIndex(assemblyName, m_manifestAssemblyCount)); m_manifestAssemblyCount++; } // When a composite image contributes to a larger version bubble, its manifest assembly // count may exceed its component assembly count as it may contain references to // assemblies outside of the composite image that are part of its version bubble. _ASSERTE(m_manifestAssemblyCount >= m_componentAssemblyCount); S_SIZE_T dwAllocSize = S_SIZE_T(sizeof(PTR_Assembly)) * S_SIZE_T(m_manifestAssemblyCount); // Note: Memory allocated on loader heap is zero filled m_pNativeMetadataAssemblyRefMap = (PTR_Assembly*)pamTracker->Track(pLoaderAllocator->GetLowFrequencyHeap()->AllocMem(dwAllocSize)); } NativeImage::~NativeImage() { STANDARD_VM_CONTRACT; delete m_pReadyToRunInfo; delete m_pImageLayout; if (m_pManifestMetadata != NULL) { m_pManifestMetadata->Release(); } } #ifndef DACCESS_COMPILE NativeImage *NativeImage::Open( Module *componentModule, LPCUTF8 nativeImageFileName, AssemblyBinder *pAssemblyBinder, LoaderAllocator *pLoaderAllocator, /* out */ bool *isNewNativeImage) { STANDARD_VM_CONTRACT; NativeImage *pExistingImage = AppDomain::GetCurrentDomain()->GetNativeImage(nativeImageFileName); if (pExistingImage != nullptr) { *isNewNativeImage = false; if (pExistingImage->GetAssemblyBinder() == pAssemblyBinder) { pExistingImage->AddComponentAssemblyToCache(componentModule->GetAssembly()); return pExistingImage; } else { return nullptr; } } SString path = componentModule->GetPath(); SString::Iterator lastPathSeparatorIter = path.End(); size_t pathDirLength = 0; if (PEAssembly::FindLastPathSeparator(path, lastPathSeparatorIter)) { pathDirLength = (lastPathSeparatorIter - path.Begin()) + 1; } SString compositeImageFileName(SString::Utf8, nativeImageFileName); SString fullPath; fullPath.Set(path, path.Begin(), (COUNT_T)pathDirLength); fullPath += compositeImageFileName; LPWSTR searchPathsConfig; IfFailThrow(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NativeImageSearchPaths, &searchPathsConfig)); PEImageLayoutHolder peLoadedImage; BundleFileLocation bundleFileLocation = Bundle::ProbeAppBundle(fullPath, /*pathIsBundleRelative */ true); if (bundleFileLocation.IsValid()) { // No need to use cache for this PE image. // Composite r2r PE image is not a part of anyone's identity. // We only need it to obtain the native image, which will be cached at AppDomain level. PEImageHolder pImage = PEImage::OpenImage(fullPath, MDInternalImport_NoCache, bundleFileLocation); PEImageLayout* loaded = pImage->GetOrCreateLayout(PEImageLayout::LAYOUT_LOADED); // We will let pImage instance be freed after exiting this scope, but we will keep the layout, // thus the layout needs an AddRef, or it will be gone together with pImage. loaded->AddRef(); peLoadedImage = loaded; } if (peLoadedImage.IsNull()) { EX_TRY { peLoadedImage = PEImageLayout::LoadNative(fullPath); } EX_CATCH { SString searchPaths(searchPathsConfig); SString::CIterator start = searchPaths.Begin(); while (start != searchPaths.End()) { SString::CIterator end = start; if (!searchPaths.Find(end, PATH_SEPARATOR_CHAR_W)) { end = searchPaths.End(); } fullPath.Set(searchPaths, start, (COUNT_T)(end - start)); if (end != searchPaths.End()) { // Skip path separator character ++end; } start = end; if (fullPath.GetCount() == 0) { continue; } fullPath.Append(DIRECTORY_SEPARATOR_CHAR_W); fullPath += compositeImageFileName; EX_TRY { peLoadedImage = PEImageLayout::LoadNative(fullPath); break; } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions) } } EX_END_CATCH(SwallowAllExceptions) if (peLoadedImage.IsNull()) { // Failed to locate the native composite R2R image LOG((LF_LOADER, LL_ALWAYS, "LOADER: failed to load native image '%s' for component assembly '%S' using search paths: '%S'\n", nativeImageFileName, path.GetUnicode(), searchPathsConfig != nullptr ? searchPathsConfig : W("<use COMPlus_NativeImageSearchPaths to set>"))); RaiseFailFastException(nullptr, nullptr, 0); } } READYTORUN_HEADER *pHeader = (READYTORUN_HEADER *)peLoadedImage->GetExport("RTR_HEADER"); if (pHeader == NULL) { COMPlusThrowHR(COR_E_BADIMAGEFORMAT); } if (pHeader->Signature != READYTORUN_SIGNATURE) { COMPlusThrowHR(COR_E_BADIMAGEFORMAT); } if (pHeader->MajorVersion < MINIMUM_READYTORUN_MAJOR_VERSION || pHeader->MajorVersion > READYTORUN_MAJOR_VERSION) { COMPlusThrowHR(COR_E_BADIMAGEFORMAT); } NewHolder<NativeImage> image = new NativeImage(pAssemblyBinder, peLoadedImage.Extract(), nativeImageFileName); AllocMemTracker amTracker; image->Initialize(pHeader, pLoaderAllocator, &amTracker); pExistingImage = AppDomain::GetCurrentDomain()->SetNativeImage(nativeImageFileName, image); if (pExistingImage == nullptr) { // No pre-existing image, new image has been stored in the map *isNewNativeImage = true; amTracker.SuppressRelease(); image->AddComponentAssemblyToCache(componentModule->GetAssembly()); return image.Extract(); } // Return pre-existing image if it was loaded into the same ALC, null otherwise *isNewNativeImage = false; if (pExistingImage->GetAssemblyBinder() == pAssemblyBinder) { pExistingImage->AddComponentAssemblyToCache(componentModule->GetAssembly()); return pExistingImage; } else { return nullptr; } } #endif #ifndef DACCESS_COMPILE void NativeImage::AddComponentAssemblyToCache(Assembly *assembly) { STANDARD_VM_CONTRACT; const AssemblyNameIndex *assemblyNameIndex = m_assemblySimpleNameToIndexMap.LookupPtr(assembly->GetSimpleName()); if (assemblyNameIndex != nullptr) { VolatileStore(&m_pNativeMetadataAssemblyRefMap[assemblyNameIndex->Index], assembly); } } #endif #ifndef DACCESS_COMPILE Assembly *NativeImage::LoadManifestAssembly(uint32_t rowid, DomainAssembly *pParentAssembly) { STANDARD_VM_CONTRACT; AssemblySpec spec; spec.InitializeSpec(TokenFromRid(rowid, mdtAssemblyRef), m_pManifestMetadata, pParentAssembly); return spec.LoadAssembly(FILE_LOADED); } #endif #ifndef DACCESS_COMPILE PTR_READYTORUN_CORE_HEADER NativeImage::GetComponentAssemblyHeader(LPCUTF8 simpleName) { STANDARD_VM_CONTRACT; const AssemblyNameIndex *assemblyNameIndex = m_assemblySimpleNameToIndexMap.LookupPtr(simpleName); if (assemblyNameIndex != NULL) { const BYTE *pImageBase = (const BYTE *)m_pImageLayout->GetBase(); const READYTORUN_COMPONENT_ASSEMBLIES_ENTRY *componentAssembly = (const READYTORUN_COMPONENT_ASSEMBLIES_ENTRY *)&pImageBase[m_pComponentAssemblies->VirtualAddress] + assemblyNameIndex->Index; return (PTR_READYTORUN_CORE_HEADER)&pImageBase[componentAssembly->ReadyToRunCoreHeader.VirtualAddress]; } return NULL; } #endif #ifndef DACCESS_COMPILE void NativeImage::CheckAssemblyMvid(Assembly *assembly) const { STANDARD_VM_CONTRACT; if (m_pComponentAssemblyMvids == NULL) { return; } const AssemblyNameIndex *assemblyNameIndex = m_assemblySimpleNameToIndexMap.LookupPtr(assembly->GetSimpleName()); if (assemblyNameIndex == NULL) { return; } GUID assemblyMvid; assembly->GetMDImport()->GetScopeProps(NULL, &assemblyMvid); const byte *pImageBase = (const BYTE *)m_pImageLayout->GetBase(); const GUID *componentMvid = (const GUID *)&pImageBase[m_pComponentAssemblyMvids->VirtualAddress] + assemblyNameIndex->Index; if (IsEqualGUID(*componentMvid, assemblyMvid)) { return; } static const size_t MVID_TEXT_LENGTH = 39; WCHAR assemblyMvidText[MVID_TEXT_LENGTH]; StringFromGUID2(assemblyMvid, assemblyMvidText, MVID_TEXT_LENGTH); WCHAR componentMvidText[MVID_TEXT_LENGTH]; StringFromGUID2(*componentMvid, componentMvidText, MVID_TEXT_LENGTH); SString message; message.Printf(W("MVID mismatch between loaded assembly '%s' (MVID = %s) and an assembly with the same simple name embedded in the native image '%s' (MVID = %s)"), SString(SString::Utf8, assembly->GetSimpleName()).GetUnicode(), assemblyMvidText, SString(SString::Utf8, GetFileName()).GetUnicode(), componentMvidText); EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_FAILFAST, message.GetUnicode()); } #endif #ifndef DACCESS_COMPILE IMDInternalImport *NativeImage::LoadManifestMetadata() { STANDARD_VM_CONTRACT; IMAGE_DATA_DIRECTORY *pMeta = m_pReadyToRunInfo->FindSection(ReadyToRunSectionType::ManifestMetadata); if (pMeta == NULL) { return NULL; } IMDInternalImport *pNewImport = NULL; IfFailThrow(GetMetaDataInternalInterface((BYTE *)m_pImageLayout->GetBase() + VAL32(pMeta->VirtualAddress), VAL32(pMeta->Size), ofRead, IID_IMDInternalImport, (void **) &pNewImport)); return pNewImport; } #endif
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/libraries/System.Globalization.Extensions/tests/IdnMapping/Data/Unicode_9_0/ReadMe.txt
# Unicode IDNA Mapping and Test Data # Date: 2016-06-20, 20:40:00 GMT [KW] # © 2016 Unicode®, Inc. # Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries. # For terms of use, see http://www.unicode.org/terms_of_use.html This directory contains the final data files for version 9.0.0 of UTS #46, Unicode IDNA Compatibility Processing. http://www.unicode.org/reports/tr46/
# Unicode IDNA Mapping and Test Data # Date: 2016-06-20, 20:40:00 GMT [KW] # © 2016 Unicode®, Inc. # Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries. # For terms of use, see http://www.unicode.org/terms_of_use.html This directory contains the final data files for version 9.0.0 of UTS #46, Unicode IDNA Compatibility Processing. http://www.unicode.org/reports/tr46/
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/pal/src/safecrt/swprintf.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*** *swprintf.c - print formatted to string * *Purpose: * defines _swprintf(), _swprintf_c and _snwprintf() - print formatted data * to string * *******************************************************************************/ #include <string.h> #include <errno.h> #include <limits.h> #include "internal_securecrt.h" #include "mbusafecrt_internal.h" /*** *ifndef _COUNT_ *int _swprintf(string, format, ...) - print formatted data to string *else *ifndef _SWPRINTFS_ERROR_RETURN_FIX *int _snwprintf(string, cnt, format, ...) - print formatted data to string *else *int _swprintf_c(string, cnt, format, ...) - print formatted data to string *endif *endif * *Purpose: * Prints formatted data to the using the format string to * format data and getting as many arguments as called for * Sets up a FILE so file i/o operations can be used, make * string look like a huge buffer to it, but _flsbuf will * refuse to flush it if it fills up. Appends '\0' to make * it a true string. _output does the real work here * * Allocate the 'fake' _iob[] entry statically instead of on * the stack so that other routines can assume that _iob[] * entries are in are in DGROUP and, thus, are near. * * We alias swprintf to _swprintf * *ifdef _COUNT_ *ifndef _SWPRINTFS_ERROR_RETURN_FIX * The _snwprintf() flavor takes a count argument that is * the max number of wide characters that should be written to the * user's buffer. * We don't expose this function directly in the headers. *else * The _swprintf_c() flavor does the same thing as the _snwprintf * above, but, it also fixes a issue in the return value in the case * when there isn't enough space to write the null terminator * We don't fix this issue in _snwprintf because of backward * compatibility. In new code, however, _snwprintf is #defined to * _swprintf_c so users get the fix. * *endif * * Multi-thread: (1) Since there is no stream, this routine must * never try to get the stream lock (i.e., there is no stream * lock either). (2) Also, since there is only one statically * allocated 'fake' iob, we must lock/unlock to prevent collisions. * *Entry: * char16_t *string - pointer to place to put output *ifdef _COUNT_ * size_t count - max number of wide characters to put in buffer *endif * char16_t *format - format string to control data format/number * of arguments followed by list of arguments, number and type * controlled by format string * *Exit: * returns number of wide characters printed * *Exceptions: * *******************************************************************************/ DLLEXPORT int __cdecl swprintf_s ( char16_t *string, size_t sizeInWords, const char16_t *format, ... ) { int ret; va_list arglist; va_start(arglist, format); ret = vswprintf_s(string, sizeInWords, format, arglist); va_end(arglist); return ret; } DLLEXPORT int __cdecl _snwprintf_s ( char16_t *string, size_t sizeInWords, size_t count, const char16_t *format, ... ) { int ret; va_list arglist; va_start(arglist, format); ret = _vsnwprintf_s(string, sizeInWords, count, format, arglist); va_end(arglist); return ret; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*** *swprintf.c - print formatted to string * *Purpose: * defines _swprintf(), _swprintf_c and _snwprintf() - print formatted data * to string * *******************************************************************************/ #include <string.h> #include <errno.h> #include <limits.h> #include "internal_securecrt.h" #include "mbusafecrt_internal.h" /*** *ifndef _COUNT_ *int _swprintf(string, format, ...) - print formatted data to string *else *ifndef _SWPRINTFS_ERROR_RETURN_FIX *int _snwprintf(string, cnt, format, ...) - print formatted data to string *else *int _swprintf_c(string, cnt, format, ...) - print formatted data to string *endif *endif * *Purpose: * Prints formatted data to the using the format string to * format data and getting as many arguments as called for * Sets up a FILE so file i/o operations can be used, make * string look like a huge buffer to it, but _flsbuf will * refuse to flush it if it fills up. Appends '\0' to make * it a true string. _output does the real work here * * Allocate the 'fake' _iob[] entry statically instead of on * the stack so that other routines can assume that _iob[] * entries are in are in DGROUP and, thus, are near. * * We alias swprintf to _swprintf * *ifdef _COUNT_ *ifndef _SWPRINTFS_ERROR_RETURN_FIX * The _snwprintf() flavor takes a count argument that is * the max number of wide characters that should be written to the * user's buffer. * We don't expose this function directly in the headers. *else * The _swprintf_c() flavor does the same thing as the _snwprintf * above, but, it also fixes a issue in the return value in the case * when there isn't enough space to write the null terminator * We don't fix this issue in _snwprintf because of backward * compatibility. In new code, however, _snwprintf is #defined to * _swprintf_c so users get the fix. * *endif * * Multi-thread: (1) Since there is no stream, this routine must * never try to get the stream lock (i.e., there is no stream * lock either). (2) Also, since there is only one statically * allocated 'fake' iob, we must lock/unlock to prevent collisions. * *Entry: * char16_t *string - pointer to place to put output *ifdef _COUNT_ * size_t count - max number of wide characters to put in buffer *endif * char16_t *format - format string to control data format/number * of arguments followed by list of arguments, number and type * controlled by format string * *Exit: * returns number of wide characters printed * *Exceptions: * *******************************************************************************/ DLLEXPORT int __cdecl swprintf_s ( char16_t *string, size_t sizeInWords, const char16_t *format, ... ) { int ret; va_list arglist; va_start(arglist, format); ret = vswprintf_s(string, sizeInWords, format, arglist); va_end(arglist); return ret; } DLLEXPORT int __cdecl _snwprintf_s ( char16_t *string, size_t sizeInWords, size_t count, const char16_t *format, ... ) { int ret; va_list arglist; va_start(arglist, format); ret = _vsnwprintf_s(string, sizeInWords, count, format, arglist); va_end(arglist); return ret; }
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/pal/tests/palsuite/c_runtime/_vsnprintf_s/test9/test9.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test9.c ** ** Purpose: Test #9 for the _vsnprintf function. ** ** **===================================================================*/ #include <palsuite.h> #include "../_vsnprintf_s.h" /* * Notes: memcmp is used, as is strlen. */ PALTEST(c_runtime__vsnprintf_s_test9_paltest_vsnprintf_test9, "c_runtime/_vsnprintf_s/test9/paltest_vsnprintf_test9") { int neg = -42; int pos = 42; INT64 l = 42; if (PAL_Initialize(argc, argv) != 0) { return(FAIL); } DoNumTest("foo %i", pos, "foo 42"); DoNumTest("foo %li", 0xFFFF, "foo 65535"); DoNumTest("foo %hi", 0xFFFF, "foo -1"); DoNumTest("foo %Li", pos, "foo 42"); DoI64Test("foo %I64i", l, "42", "foo 42"); DoNumTest("foo %3i", pos, "foo 42"); DoNumTest("foo %-3i", pos, "foo 42 "); DoNumTest("foo %.1i", pos, "foo 42"); DoNumTest("foo %.3i", pos, "foo 042"); DoNumTest("foo %03i", pos, "foo 042"); DoNumTest("foo %#i", pos, "foo 42"); DoNumTest("foo %+i", pos, "foo +42"); DoNumTest("foo % i", pos, "foo 42"); DoNumTest("foo %+i", neg, "foo -42"); DoNumTest("foo % i", neg, "foo -42"); PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test9.c ** ** Purpose: Test #9 for the _vsnprintf function. ** ** **===================================================================*/ #include <palsuite.h> #include "../_vsnprintf_s.h" /* * Notes: memcmp is used, as is strlen. */ PALTEST(c_runtime__vsnprintf_s_test9_paltest_vsnprintf_test9, "c_runtime/_vsnprintf_s/test9/paltest_vsnprintf_test9") { int neg = -42; int pos = 42; INT64 l = 42; if (PAL_Initialize(argc, argv) != 0) { return(FAIL); } DoNumTest("foo %i", pos, "foo 42"); DoNumTest("foo %li", 0xFFFF, "foo 65535"); DoNumTest("foo %hi", 0xFFFF, "foo -1"); DoNumTest("foo %Li", pos, "foo 42"); DoI64Test("foo %I64i", l, "42", "foo 42"); DoNumTest("foo %3i", pos, "foo 42"); DoNumTest("foo %-3i", pos, "foo 42 "); DoNumTest("foo %.1i", pos, "foo 42"); DoNumTest("foo %.3i", pos, "foo 042"); DoNumTest("foo %03i", pos, "foo 042"); DoNumTest("foo %#i", pos, "foo 42"); DoNumTest("foo %+i", pos, "foo +42"); DoNumTest("foo % i", pos, "foo 42"); DoNumTest("foo %+i", neg, "foo -42"); DoNumTest("foo % i", neg, "foo -42"); PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/tools/metainfo/mdinfo.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdio.h> #include <windows.h> #include <objbase.h> #include <crtdbg.h> #include <assert.h> #include <corpriv.h> #include <cor.h> #include "assert.h" #include "corerror.h" #include <winwrap.h> #include <prettyprintsig.h> #include <cahlpr.h> #include <limits.h> #include "mdinfo.h" #define ENUM_BUFFER_SIZE 10 #define TAB_SIZE 8 #define ISFLAG(p,x) if (Is##p##x(flags)) strcat_s(sFlags,STRING_BUFFER_LEN, "["#x "] "); extern HRESULT _FillVariant( BYTE bCPlusTypeFlag, void const *pValue, ULONG cbValue, VARIANT *pvar); // Validator declarations. extern DWORD g_ValModuleType; // Tables for mapping element type to text const char *g_szMapElementType[] = { "End", // 0x0 "Void", // 0x1 "Boolean", "Char", "I1", "UI1", "I2", // 0x6 "UI2", "I4", "UI4", "I8", "UI8", "R4", "R8", "String", "Ptr", // 0xf "ByRef", // 0x10 "ValueClass", "Class", "Var", "MDArray", // 0x14 "GenericInst", "TypedByRef", "VALUEARRAY", "I", "U", "R", // 0x1a "FNPTR", "Object", "SZArray", "MVar", "CMOD_REQD", "CMOD_OPT", "INTERNAL", }; const char *g_szMapUndecorateType[] = { "", // 0x0 "void", "boolean", "Char", "byte", "unsigned byte", "short", "unsigned short", "int", "unsigned int", "long", "unsigned long", "float", "double", "String", "*", // 0xf "ByRef", "", "", "", "", "", "", "", "", "", "", "Function Pointer", "Object", "", "", "CMOD_REQD", "CMOD_OPT", "INTERNAL", }; // Provide enough entries for IMAGE_CEE_CS_CALLCONV_MASK (defined in CorHdr.h) const char *g_strCalling[] = { "[DEFAULT]", "[C]", "[STDCALL]", "[THISCALL]", "[FASTCALL]", "[VARARG]", "[FIELD]", "[LOCALSIG]", "[PROPERTY]", "[UNMANAGED]", "[GENERICINST]", "[NATIVEVARARG]", "[INVALID]", "[INVALID]", "[INVALID]", "[INVALID]" }; const char *g_szNativeType[] = { "NATIVE_TYPE_END(DEPRECATED!)", // = 0x0, //DEPRECATED "NATIVE_TYPE_VOID(DEPRECATED!)", // = 0x1, //DEPRECATED "NATIVE_TYPE_BOOLEAN", // = 0x2, // (4 byte boolean value: TRUE = non-zero, FALSE = 0) "NATIVE_TYPE_I1", // = 0x3, "NATIVE_TYPE_U1", // = 0x4, "NATIVE_TYPE_I2", // = 0x5, "NATIVE_TYPE_U2", // = 0x6, "NATIVE_TYPE_I4", // = 0x7, "NATIVE_TYPE_U4", // = 0x8, "NATIVE_TYPE_I8", // = 0x9, "NATIVE_TYPE_U8", // = 0xa, "NATIVE_TYPE_R4", // = 0xb, "NATIVE_TYPE_R8", // = 0xc, "NATIVE_TYPE_SYSCHAR(DEPRECATED!)", // = 0xd, //DEPRECATED "NATIVE_TYPE_VARIANT(DEPRECATED!)", // = 0xe, //DEPRECATED "NATIVE_TYPE_CURRENCY", // = 0xf, "NATIVE_TYPE_PTR(DEPRECATED!)", // = 0x10, //DEPRECATED "NATIVE_TYPE_DECIMAL(DEPRECATED!)", // = 0x11, //DEPRECATED "NATIVE_TYPE_DATE(DEPRECATED!)", // = 0x12, //DEPRECATED "NATIVE_TYPE_BSTR", // = 0x13, "NATIVE_TYPE_LPSTR", // = 0x14, "NATIVE_TYPE_LPWSTR", // = 0x15, "NATIVE_TYPE_LPTSTR", // = 0x16, "NATIVE_TYPE_FIXEDSYSSTRING", // = 0x17, "NATIVE_TYPE_OBJECTREF(DEPRECATED!)", // = 0x18, //DEPRECATED "NATIVE_TYPE_IUNKNOWN", // = 0x19, "NATIVE_TYPE_IDISPATCH", // = 0x1a, "NATIVE_TYPE_STRUCT", // = 0x1b, "NATIVE_TYPE_INTF", // = 0x1c, "NATIVE_TYPE_SAFEARRAY", // = 0x1d, "NATIVE_TYPE_FIXEDARRAY", // = 0x1e, "NATIVE_TYPE_INT", // = 0x1f, "NATIVE_TYPE_UINT", // = 0x20, "NATIVE_TYPE_NESTEDSTRUCT(DEPRECATED!)", // = 0x21, //DEPRECATED (use "NATIVE_TYPE_STRUCT) "NATIVE_TYPE_BYVALSTR", // = 0x22, "NATIVE_TYPE_ANSIBSTR", // = 0x23, "NATIVE_TYPE_TBSTR", // = 0x24, // select BSTR or ANSIBSTR depending on platform "NATIVE_TYPE_VARIANTBOOL", // = 0x25, // (2-byte boolean value: TRUE = -1, FALSE = 0) "NATIVE_TYPE_FUNC", // = 0x26, "NATIVE_TYPE_LPVOID", // = 0x27, // blind pointer (no deep marshaling) "NATIVE_TYPE_ASANY", // = 0x28, "<UNDEFINED NATIVE TYPE 0x29>", "NATIVE_TYPE_ARRAY", // = 0x2a, "NATIVE_TYPE_LPSTRUCT", // = 0x2b, "NATIVE_TYPE_CUSTOMMARSHALER", // = 0x2c, // Custom marshaler. "NATIVE_TYPE_ERROR", // = 0x2d, // VT_HRESULT when exporting to a typelib. }; size_t g_cbCoffNames = 0; mdMethodDef g_tkEntryPoint = 0; // integration with ILDASM // helper to init signature buffer void MDInfo::InitSigBuffer() { strcpy_s((LPSTR)m_sigBuf.Ptr(), 1, ""); } // void MDInfo::InitSigBuffer() // helper to append a string into the signature buffer. If size of signature buffer is not big enough, // we will grow it. HRESULT MDInfo::AddToSigBuffer(_In_z_ const char *string) { HRESULT hr; size_t LL = strlen((LPSTR)m_sigBuf.Ptr()) + strlen(string) + 1; IfFailRet( m_sigBuf.ReSizeNoThrow(LL) ); strcat_s((LPSTR)m_sigBuf.Ptr(), LL, string); return NOERROR; } // HRESULT MDInfo::AddToSigBuffer() MDInfo::MDInfo(IMetaDataImport2 *pImport, IMetaDataAssemblyImport *pAssemblyImport, LPCWSTR szScope, strPassBackFn inPBFn, ULONG DumpFilter) { // This constructor is specific to ILDASM/MetaInfo integration _ASSERTE(pImport != NULL); _ASSERTE(ARRAY_SIZE(g_szMapElementType) == ARRAY_SIZE(g_szMapUndecorateType)); _ASSERTE(ARRAY_SIZE(g_szMapElementType) == ELEMENT_TYPE_MAX); Init(inPBFn, (DUMP_FILTER)DumpFilter); m_pImport = pImport; m_pImport->AddRef(); if ((m_pAssemblyImport = pAssemblyImport)) m_pAssemblyImport->AddRef(); else { HRESULT hr = m_pImport->QueryInterface(IID_IMetaDataAssemblyImport, (void**) &m_pAssemblyImport); if (FAILED(hr)) Error("QueryInterface failed for IID_IMetaDataAssemblyImport.", hr); } } // MDInfo::MDInfo() MDInfo::MDInfo(IMetaDataDispenserEx *pDispenser, LPCWSTR szScope, strPassBackFn inPBFn, ULONG DumpFilter) { HRESULT hr = S_OK; VARIANT value; _ASSERTE(pDispenser != NULL && inPBFn != NULL); _ASSERTE(ARRAY_SIZE(g_szMapElementType) == ARRAY_SIZE(g_szMapUndecorateType)); _ASSERTE(ARRAY_SIZE(g_szMapElementType) == ELEMENT_TYPE_MAX); Init(inPBFn, (DUMP_FILTER)DumpFilter); // Attempt to open scope on given file V_VT(&value) = VT_UI4; V_UI4(&value) = MDImportOptionAll; if (FAILED(hr = pDispenser->SetOption(MetaDataImportOption, &value))) Error("SetOption failed.", hr); hr = pDispenser->OpenScope(szScope, ofNoTransform, IID_IMetaDataImport2, (IUnknown**)&m_pImport); if (hr == CLDB_E_BADUPDATEMODE) { V_VT(&value) = VT_UI4; V_UI4(&value) = MDUpdateIncremental; if (FAILED(hr = pDispenser->SetOption(MetaDataSetUpdate, &value))) Error("SetOption failed.", hr); hr = pDispenser->OpenScope(szScope, ofNoTransform, IID_IMetaDataImport2, (IUnknown**)&m_pImport); } if (FAILED(hr)) Error("OpenScope failed", hr); // Query for the IMetaDataAssemblyImport interface. hr = m_pImport->QueryInterface(IID_IMetaDataAssemblyImport, (void**) &m_pAssemblyImport); if (FAILED(hr)) Error("QueryInterface failed for IID_IMetaDataAssemblyImport.", hr); } // MDInfo::MDInfo() MDInfo::MDInfo(IMetaDataDispenserEx *pDispenser, PBYTE pbMetaData, DWORD dwSize, strPassBackFn inPBFn, ULONG DumpFilter) { _ASSERTE(pDispenser != NULL && inPBFn != NULL); _ASSERTE(ARRAY_SIZE(g_szMapElementType) == ARRAY_SIZE(g_szMapUndecorateType)); _ASSERTE(ARRAY_SIZE(g_szMapElementType) == ELEMENT_TYPE_MAX); Init(inPBFn, (DUMP_FILTER)DumpFilter); // Attempt to open scope on manifest. It's valid for this to fail, because // the blob we open may just be the assembly resources (the space is // overloaded until we remove LM -a assemblies, at which point this // constructor should probably be removed too). HRESULT hr; VARIANT value; V_VT(&value) = VT_UI4; V_UI4(&value) = MDImportOptionAll; if (FAILED(hr = pDispenser->SetOption(MetaDataImportOption, &value))) Error("SetOption failed.", hr); if (SUCCEEDED(hr = pDispenser->OpenScopeOnMemory(pbMetaData, dwSize, ofNoTransform, IID_IMetaDataImport2, (IUnknown**)&m_pImport))) { // Query for the IMetaDataAssemblyImport interface. hr = m_pImport->QueryInterface(IID_IMetaDataAssemblyImport, (void**) &m_pAssemblyImport); if (FAILED(hr)) Error("QueryInterface failed for IID_IMetaDataAssemblyImport.", hr); } } // MDInfo::MDInfo() void MDInfo::Init( strPassBackFn inPBFn, // Callback to write text. DUMP_FILTER DumpFilter) // Flags to control the dump. { m_pbFn = inPBFn; m_DumpFilter = DumpFilter; m_pTables = NULL; m_pTables2 = NULL; m_pImport = NULL; m_pAssemblyImport = NULL; } // void MDInfo::Init() // Destructor MDInfo::~MDInfo() { if (m_pImport) m_pImport->Release(); if (m_pAssemblyImport) m_pAssemblyImport->Release(); if (m_pTables) m_pTables->Release(); if (m_pTables2) m_pTables2->Release(); } // MDInfo::~MDInfo() //===================================================================================================================== // DisplayMD() function // // Displays the meta data content of a file void MDInfo::DisplayMD() { if ((m_DumpFilter & dumpAssem) && m_pAssemblyImport) DisplayAssemblyInfo(); WriteLine("==========================================================="); // Metadata itself: Raw or normal view if (m_DumpFilter & (dumpSchema | dumpHeader | dumpCSV | dumpRaw | dumpStats | dumpRawHeaps)) DisplayRaw(); else { DisplayVersionInfo(); DisplayScopeInfo(); WriteLine("==========================================================="); DisplayGlobalFunctions(); DisplayGlobalFields(); DisplayGlobalMemberRefs(); DisplayTypeDefs(); DisplayTypeRefs(); DisplayTypeSpecs(); DisplayMethodSpecs(); DisplayModuleRefs(); DisplaySignatures(); DisplayAssembly(); DisplayUserStrings(); // WriteLine("============================================================"); // WriteLine("Unresolved MemberRefs"); // DisplayMemberRefs(0x00000001, "\t"); VWrite("\n\nCoff symbol name overhead: %d\n", g_cbCoffNames); } WriteLine("==========================================================="); if (m_DumpFilter & dumpUnsat) DisplayUnsatInfo(); WriteLine("==========================================================="); } // MDVEHandlerClass() int MDInfo::WriteLine(_In_z_ const char *str) { ULONG32 count = (ULONG32) strlen(str); m_pbFn(str); m_pbFn("\n"); return count; } // int MDInfo::WriteLine() int MDInfo::Write(_In_z_ const char *str) { ULONG32 count = (ULONG32) strlen(str); m_pbFn(str); return count; } // int MDInfo::Write() int MDInfo::VWriteLine(_In_z_ const char *str, ...) { va_list marker; int count; va_start(marker, str); count = VWriteMarker(str, marker); m_pbFn("\n"); va_end(marker); return count; } // int MDInfo::VWriteLine() int MDInfo::VWrite(_In_z_ const char *str, ...) { va_list marker; int count; va_start(marker, str); count = VWriteMarker(str, marker); va_end(marker); return count; } // int MDInfo::VWrite() int MDInfo::VWriteMarker(_In_z_ const char *str, va_list marker) { HRESULT hr; int count = -1; // Used to allocate 1K, then if not enough, 2K, then 4K. // Faster to allocate 32K right away and be done with it, // we're not running on Commodore 64 if (FAILED(hr = m_output.ReSizeNoThrow(STRING_BUFFER_LEN * 8))) Error("ReSize failed.", hr); else { count = vsprintf_s((char *)m_output.Ptr(), STRING_BUFFER_LEN * 8, str, marker); m_pbFn((char *)m_output.Ptr()); } return count; } // int MDInfo::VWriteToBuffer() // Error() function -- prints an error and returns void MDInfo::Error(const char* szError, HRESULT hr) { printf("\n%s\n",szError); if (hr != S_OK) { printf("Failed return code: 0x%08x\n", hr); IErrorInfo *pIErr = NULL; // Error interface. BSTR bstrDesc = NULL; // Description text. #ifdef FEATURE_COMINTEROP // Try to get an error info object and display the message. if (GetErrorInfo(0, &pIErr) == S_OK && pIErr->GetDescription(&bstrDesc) == S_OK) { printf("%ls ", bstrDesc); SysFreeString(bstrDesc); } #endif // Free the error interface. if (pIErr) pIErr->Release(); } exit(hr); } // void MDInfo::Error() // Print out the optional version info included in the MetaData. void MDInfo::DisplayVersionInfo() { if (!(m_DumpFilter & MDInfo::dumpNoLogo)) { LPCUTF8 pVersionStr; HRESULT hr = S_OK; if (m_pTables == 0) { if (m_pImport) hr = m_pImport->QueryInterface(IID_IMetaDataTables, (void**)&m_pTables); else if (m_pAssemblyImport) hr = m_pAssemblyImport->QueryInterface(IID_IMetaDataTables, (void**)&m_pTables); else return; if (FAILED(hr)) Error("QueryInterface failed for IID_IMetaDataTables.", hr); } hr = m_pTables->GetString(1, &pVersionStr); if (FAILED(hr)) Error("GetString() failed.", hr); if (strstr(pVersionStr, "Version of runtime against which the binary is built : ") == pVersionStr) { WriteLine(const_cast<char *>(pVersionStr)); } } } // void MDInfo::DisplayVersionInfo() // Prints out information about the scope void MDInfo::DisplayScopeInfo() { HRESULT hr; mdModule mdm; GUID mvid; WCHAR scopeName[STRING_BUFFER_LEN]; WCHAR guidString[STRING_BUFFER_LEN]; hr = m_pImport->GetScopeProps( scopeName, STRING_BUFFER_LEN, 0, &mvid); if (FAILED(hr)) Error("GetScopeProps failed.", hr); VWriteLine("ScopeName : %ls",scopeName); if (!(m_DumpFilter & MDInfo::dumpNoLogo)) VWriteLine("MVID : %ls",GUIDAsString(mvid, guidString, STRING_BUFFER_LEN)); hr = m_pImport->GetModuleFromScope(&mdm); if (FAILED(hr)) Error("GetModuleFromScope failed.", hr); DisplayPermissions(mdm, ""); DisplayCustomAttributes(mdm, "\t"); } // void MDInfo::DisplayScopeInfo() void MDInfo::DisplayRaw() { int iDump; // Level of info to dump. if (m_pTables == 0) m_pImport->QueryInterface(IID_IMetaDataTables, (void**)&m_pTables); if (m_pTables == 0) Error("Can't get table info."); if (m_pTables2 == 0) m_pImport->QueryInterface(IID_IMetaDataTables2, (void**)&m_pTables2); if (m_DumpFilter & dumpCSV) DumpRawCSV(); if (m_DumpFilter & (dumpSchema | dumpHeader | dumpRaw | dumpStats)) { if (m_DumpFilter & dumpRaw) iDump = 3; else if (m_DumpFilter & dumpSchema) iDump = 2; else iDump = 1; DumpRaw(iDump, (m_DumpFilter & dumpStats) != 0); } if (m_DumpFilter & dumpRawHeaps) DumpRawHeaps(); } // void MDInfo::DisplayRaw() // return the name of the type of token passed in const char *MDInfo::TokenTypeName(mdToken inToken) { switch(TypeFromToken(inToken)) { case mdtTypeDef: return "TypeDef"; case mdtInterfaceImpl: return "InterfaceImpl"; case mdtMethodDef: return "MethodDef"; case mdtFieldDef: return "FieldDef"; case mdtTypeRef: return "TypeRef"; case mdtMemberRef: return "MemberRef"; case mdtCustomAttribute:return "CustomAttribute"; case mdtParamDef: return "ParamDef"; case mdtProperty: return "Property"; case mdtEvent: return "Event"; case mdtTypeSpec: return "TypeSpec"; default: return "[UnknownTokenType]"; } } // char *MDInfo::TokenTypeName() // Prints out name of the given memberref // LPCWSTR MDInfo::MemberRefName(mdMemberRef inMemRef, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen) { HRESULT hr; hr = m_pImport->GetMemberRefProps( inMemRef, NULL, buffer, bufLen, NULL, NULL, NULL); if (FAILED(hr)) Error("GetMemberRefProps failed.", hr); return buffer; } // LPCWSTR MDInfo::MemberRefName() // Prints out information about the given memberref // void MDInfo::DisplayMemberRefInfo(mdMemberRef inMemRef, const char *preFix) { HRESULT hr; WCHAR memRefName[STRING_BUFFER_LEN]; ULONG nameLen; mdToken token; PCCOR_SIGNATURE pbSigBlob; ULONG ulSigBlob; char newPreFix[STRING_BUFFER_LEN]; hr = m_pImport->GetMemberRefProps( inMemRef, &token, memRefName, STRING_BUFFER_LEN, &nameLen, &pbSigBlob, &ulSigBlob); if (FAILED(hr)) Error("GetMemberRefProps failed.", hr); VWriteLine("%s\t\tMember: (%8.8x) %ls: ", preFix, inMemRef, memRefName); if (ulSigBlob) DisplaySignature(pbSigBlob, ulSigBlob, preFix); else VWriteLine("%s\t\tERROR: no valid signature ", preFix); sprintf_s (newPreFix, STRING_BUFFER_LEN, "\t\t%s", preFix); DisplayCustomAttributes(inMemRef, newPreFix); } // void MDInfo::DisplayMemberRefInfo() // Prints out information about all memberrefs of the given typeref // void MDInfo::DisplayMemberRefs(mdToken tkParent, const char *preFix) { HCORENUM memRefEnum = NULL; HRESULT hr; mdMemberRef memRefs[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; while (SUCCEEDED(hr = m_pImport->EnumMemberRefs( &memRefEnum, tkParent, memRefs, ARRAY_SIZE(memRefs), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("%s\tMemberRef #%d (%08x)", preFix, totalCount, memRefs[i]); VWriteLine("%s\t-------------------------------------------------------", preFix); DisplayMemberRefInfo(memRefs[i], preFix); } } m_pImport->CloseEnum( memRefEnum); } // void MDInfo::DisplayMemberRefs() // Prints out information about all resources in the com object // // Iterates through each typeref and prints out the information of each // void MDInfo::DisplayTypeRefs() { HCORENUM typeRefEnum = NULL; mdTypeRef typeRefs[ENUM_BUFFER_SIZE]; ULONG count, totalCount=1; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumTypeRefs( &typeRefEnum, typeRefs, ARRAY_SIZE(typeRefs), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("TypeRef #%d (%08x)", totalCount, typeRefs[i]); WriteLine("-------------------------------------------------------"); DisplayTypeRefInfo(typeRefs[i]); DisplayMemberRefs(typeRefs[i], ""); WriteLine(""); } } m_pImport->CloseEnum( typeRefEnum); } // void MDInfo::DisplayTypeRefs() void MDInfo::DisplayTypeSpecs() { HCORENUM typespecEnum = NULL; mdTypeSpec typespecs[ENUM_BUFFER_SIZE]; ULONG count, totalCount=1; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumTypeSpecs( &typespecEnum, typespecs, ARRAY_SIZE(typespecs), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("TypeSpec #%d (%08x)", totalCount, typespecs[i]); WriteLine("-------------------------------------------------------"); DisplayTypeSpecInfo(typespecs[i], ""); DisplayMemberRefs(typespecs[i], ""); WriteLine(""); } } m_pImport->CloseEnum( typespecEnum); } // void MDInfo::DisplayTypeSpecs() void MDInfo::DisplayMethodSpecs() { HCORENUM MethodSpecEnum = NULL; mdMethodSpec MethodSpecs[ENUM_BUFFER_SIZE]; ULONG count, totalCount=1; ///// HRESULT hr; ///// HACK until I implement EnumMethodSpecs! ///// while (SUCCEEDED(hr = m_pImport->EnumMethodSpecs( &MethodSpecEnum, ///// MethodSpecs, ARRAY_SIZE(MethodSpecs), &count)) && ///// count > 0) for (ULONG rid=1; m_pImport->IsValidToken(TokenFromRid(rid, mdtMethodSpec)); ++rid) { // More hackery count = 1; MethodSpecs[0] = TokenFromRid(rid, mdtMethodSpec); // More hackery for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("MethodSpec #%d (%08x)", totalCount, MethodSpecs[i]); DisplayMethodSpecInfo(MethodSpecs[i], ""); WriteLine(""); } } m_pImport->CloseEnum( MethodSpecEnum); } // void MDInfo::DisplayMethodSpecs() // Called to display the information about all typedefs in the object. // void MDInfo::DisplayTypeDefs() { HCORENUM typeDefEnum = NULL; mdTypeDef typeDefs[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumTypeDefs( &typeDefEnum, typeDefs, ARRAY_SIZE(typeDefs), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("TypeDef #%d (%08x)", totalCount, typeDefs[i]); WriteLine("-------------------------------------------------------"); DisplayTypeDefInfo(typeDefs[i]); WriteLine(""); } } m_pImport->CloseEnum( typeDefEnum); } // void MDInfo::DisplayTypeDefs() // Called to display the information about all modulerefs in the object. // void MDInfo::DisplayModuleRefs() { HCORENUM moduleRefEnum = NULL; mdModuleRef moduleRefs[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumModuleRefs( &moduleRefEnum, moduleRefs, ARRAY_SIZE(moduleRefs), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("ModuleRef #%d (%08x)", totalCount, moduleRefs[i]); WriteLine("-------------------------------------------------------"); DisplayModuleRefInfo(moduleRefs[i]); DisplayMemberRefs(moduleRefs[i], ""); WriteLine(""); } } m_pImport->CloseEnum( moduleRefEnum); } // void MDInfo::DisplayModuleRefs() // Prints out information about the given moduleref // void MDInfo::DisplayModuleRefInfo(mdModuleRef inModuleRef) { HRESULT hr; WCHAR moduleRefName[STRING_BUFFER_LEN]; ULONG nameLen; hr = m_pImport->GetModuleRefProps( inModuleRef, moduleRefName, STRING_BUFFER_LEN, &nameLen); if (FAILED(hr)) Error("GetModuleRefProps failed.", hr); VWriteLine("\t\tModuleRef: (%8.8x) %ls: ", inModuleRef, moduleRefName); DisplayCustomAttributes(inModuleRef, "\t\t"); } // void MDInfo::DisplayModuleRefInfo() // Called to display the information about all signatures in the object. // void MDInfo::DisplaySignatures() { HCORENUM signatureEnum = NULL; mdSignature signatures[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumSignatures( &signatureEnum, signatures, ARRAY_SIZE(signatures), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("Signature #%d (%#08x)", totalCount, signatures[i]); WriteLine("-------------------------------------------------------"); DisplaySignatureInfo(signatures[i]); WriteLine(""); } } m_pImport->CloseEnum( signatureEnum); } // void MDInfo::DisplaySignatures() // Prints out information about the given signature // void MDInfo::DisplaySignatureInfo(mdSignature inSignature) { HRESULT hr; PCCOR_SIGNATURE pbSigBlob; ULONG ulSigBlob; hr = m_pImport->GetSigFromToken( inSignature, &pbSigBlob, &ulSigBlob ); if (FAILED(hr)) Error("GetSigFromToken failed.", hr); if(ulSigBlob) DisplaySignature(pbSigBlob, ulSigBlob, ""); else VWriteLine("\t\tERROR: no valid signature "); } // void MDInfo::DisplaySignatureInfo() // returns the passed-in buffer which is filled with the name of the given // member in wide characters // LPCWSTR MDInfo::MemberName(mdToken inToken, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen) { HRESULT hr; hr = m_pImport->GetMemberProps( inToken, NULL, buffer, bufLen, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); if (FAILED(hr)) Error("GetMemberProps failed.", hr); return (buffer); } // LPCWSTR MDInfo::MemberName() // displays information for the given method // void MDInfo::DisplayMethodInfo(mdMethodDef inMethod, DWORD *pflags) { HRESULT hr; mdTypeDef memTypeDef; WCHAR memberName[STRING_BUFFER_LEN]; ULONG nameLen; DWORD flags; PCCOR_SIGNATURE pbSigBlob; ULONG ulSigBlob; ULONG ulCodeRVA; ULONG ulImplFlags; hr = m_pImport->GetMethodProps( inMethod, &memTypeDef, memberName, STRING_BUFFER_LEN, &nameLen, &flags, &pbSigBlob, &ulSigBlob, &ulCodeRVA, &ulImplFlags); if (FAILED(hr)) Error("GetMethodProps failed.", hr); if (pflags) *pflags = flags; VWriteLine("\t\tMethodName: %ls (%8.8X)", memberName, inMethod); char sFlags[STRING_BUFFER_LEN]; sFlags[0] = 0; ISFLAG(Md, Public); ISFLAG(Md, Private); ISFLAG(Md, Family); ISFLAG(Md, Assem); ISFLAG(Md, FamANDAssem); ISFLAG(Md, FamORAssem); ISFLAG(Md, PrivateScope); ISFLAG(Md, Static); ISFLAG(Md, Final); ISFLAG(Md, Virtual); ISFLAG(Md, HideBySig); ISFLAG(Md, ReuseSlot); ISFLAG(Md, NewSlot); ISFLAG(Md, Abstract); ISFLAG(Md, SpecialName); ISFLAG(Md, RTSpecialName); ISFLAG(Md, PinvokeImpl); ISFLAG(Md, UnmanagedExport); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); bool result = (((flags) & mdRTSpecialName) && !wcscmp((memberName), W(".ctor"))); if (result) strcat_s(sFlags, STRING_BUFFER_LEN, "[.ctor] "); result = (((flags) & mdRTSpecialName) && !wcscmp((memberName), W(".cctor"))); if (result) strcat_s(sFlags,STRING_BUFFER_LEN, "[.cctor] "); // "Reserved" flags ISFLAG(Md, HasSecurity); ISFLAG(Md, RequireSecObject); VWriteLine("\t\tFlags : %s (%08x)", sFlags, flags); VWriteLine("\t\tRVA : 0x%08x", ulCodeRVA); flags = ulImplFlags; sFlags[0] = 0; ISFLAG(Mi, Native); ISFLAG(Mi, IL); ISFLAG(Mi, OPTIL); ISFLAG(Mi, Runtime); ISFLAG(Mi, Unmanaged); ISFLAG(Mi, Managed); ISFLAG(Mi, ForwardRef); ISFLAG(Mi, PreserveSig); ISFLAG(Mi, InternalCall); ISFLAG(Mi, Synchronized); ISFLAG(Mi, NoInlining); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); VWriteLine("\t\tImplFlags : %s (%08x)", sFlags, flags); if (ulSigBlob) DisplaySignature(pbSigBlob, ulSigBlob, ""); else VWriteLine("\t\tERROR: no valid signature "); DisplayGenericParams(inMethod, "\t\t"); } // void MDInfo::DisplayMethodInfo() // displays the member information for the given field // void MDInfo::DisplayFieldInfo(mdFieldDef inField, DWORD *pdwFlags) { HRESULT hr; mdTypeDef memTypeDef; WCHAR memberName[STRING_BUFFER_LEN]; ULONG nameLen; DWORD flags; PCCOR_SIGNATURE pbSigBlob; ULONG ulSigBlob; DWORD dwCPlusTypeFlag; void const *pValue; ULONG cbValue; #ifdef FEATURE_COMINTEROP VARIANT defaultValue; ::VariantInit(&defaultValue); #endif hr = m_pImport->GetFieldProps( inField, &memTypeDef, memberName, STRING_BUFFER_LEN, &nameLen, &flags, &pbSigBlob, &ulSigBlob, &dwCPlusTypeFlag, &pValue, &cbValue); if (FAILED(hr)) Error("GetFieldProps failed.", hr); if (pdwFlags) *pdwFlags = flags; #ifdef FEATURE_COMINTEROP _FillVariant((BYTE)dwCPlusTypeFlag, pValue, cbValue, &defaultValue); #endif char sFlags[STRING_BUFFER_LEN]; sFlags[0] = 0; ISFLAG(Fd, Public); ISFLAG(Fd, Private); ISFLAG(Fd, Family); ISFLAG(Fd, Assembly); ISFLAG(Fd, FamANDAssem); ISFLAG(Fd, FamORAssem); ISFLAG(Fd, PrivateScope); ISFLAG(Fd, Static); ISFLAG(Fd, InitOnly); ISFLAG(Fd, Literal); ISFLAG(Fd, NotSerialized); ISFLAG(Fd, SpecialName); ISFLAG(Fd, RTSpecialName); ISFLAG(Fd, PinvokeImpl); // "Reserved" flags ISFLAG(Fd, HasDefault); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); VWriteLine("\t\tField Name: %ls (%8.8X)", memberName, inField); VWriteLine("\t\tFlags : %s (%08x)", sFlags, flags); #ifdef FEATURE_COMINTEROP if (IsFdHasDefault(flags)) VWriteLine("\tDefltValue: (%s) %ls", g_szMapElementType[dwCPlusTypeFlag], VariantAsString(&defaultValue)); #endif if (!ulSigBlob) // Signature size should be non-zero for fields VWriteLine("\t\tERROR: no valid signature "); else DisplaySignature(pbSigBlob, ulSigBlob, ""); #ifdef FEATURE_COMINTEROP ::VariantClear(&defaultValue); #endif } // void MDInfo::DisplayFieldInfo() // displays the RVA for the given global field. void MDInfo::DisplayFieldRVA(mdFieldDef inFieldDef) { HRESULT hr; ULONG ulRVA; hr = m_pImport->GetRVA(inFieldDef, &ulRVA, 0); if (FAILED(hr) && hr != CLDB_E_RECORD_NOTFOUND) Error("GetRVA failed.", hr); VWriteLine("\t\tRVA : 0x%08x", ulRVA); } // void MDInfo::DisplayFieldRVA() // displays information about every global function. void MDInfo::DisplayGlobalFunctions() { WriteLine("Global functions"); WriteLine("-------------------------------------------------------"); DisplayMethods(mdTokenNil); WriteLine(""); } // void MDInfo::DisplayGlobalFunctions() // displays information about every global field. void MDInfo::DisplayGlobalFields() { WriteLine("Global fields"); WriteLine("-------------------------------------------------------"); DisplayFields(mdTokenNil, NULL, 0); WriteLine(""); } // void MDInfo::DisplayGlobalFields() // displays information about every global memberref. void MDInfo::DisplayGlobalMemberRefs() { WriteLine("Global MemberRefs"); WriteLine("-------------------------------------------------------"); DisplayMemberRefs(mdTokenNil, ""); WriteLine(""); } // void MDInfo::DisplayGlobalMemberRefs() // displays information about every method in a given typedef // void MDInfo::DisplayMethods(mdTypeDef inTypeDef) { HCORENUM methodEnum = NULL; mdToken methods[ENUM_BUFFER_SIZE]; DWORD flags; ULONG count, totalCount = 1; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumMethods( &methodEnum, inTypeDef, methods, ARRAY_SIZE(methods), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("\tMethod #%d (%08x) %s", totalCount, methods[i], (methods[i] == g_tkEntryPoint) ? "[ENTRYPOINT]" : ""); WriteLine("\t-------------------------------------------------------"); DisplayMethodInfo(methods[i], &flags); DisplayParams(methods[i]); DisplayCustomAttributes(methods[i], "\t\t"); DisplayPermissions(methods[i], "\t"); DisplayMemberRefs(methods[i], "\t"); // P-invoke data if present. if (IsMdPinvokeImpl(flags)) DisplayPinvokeInfo(methods[i]); WriteLine(""); } } m_pImport->CloseEnum( methodEnum); } // void MDInfo::DisplayMethods() // displays information about every field in a given typedef // void MDInfo::DisplayFields(mdTypeDef inTypeDef, COR_FIELD_OFFSET *rFieldOffset, ULONG cFieldOffset) { HCORENUM fieldEnum = NULL; mdToken fields[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; DWORD flags; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumFields( &fieldEnum, inTypeDef, fields, ARRAY_SIZE(fields), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("\tField #%d (%08x)",totalCount, fields[i]); WriteLine("\t-------------------------------------------------------"); DisplayFieldInfo(fields[i], &flags); DisplayCustomAttributes(fields[i], "\t\t"); DisplayPermissions(fields[i], "\t"); DisplayFieldMarshal(fields[i]); // RVA if its a global field. if (inTypeDef == mdTokenNil) DisplayFieldRVA(fields[i]); // P-invoke data if present. if (IsFdPinvokeImpl(flags)) DisplayPinvokeInfo(fields[i]); // Display offset if present. if (cFieldOffset) { bool found = false; for (ULONG iLayout = 0; iLayout < cFieldOffset; ++iLayout) { if (RidFromToken(rFieldOffset[iLayout].ridOfField) == RidFromToken(fields[i])) { found = true; VWriteLine("\t\tOffset : 0x%08x", rFieldOffset[iLayout].ulOffset); break; } } _ASSERTE(found); } WriteLine(""); } } m_pImport->CloseEnum( fieldEnum); } // void MDInfo::DisplayFields() // displays information about every methodImpl in a given typedef // void MDInfo::DisplayMethodImpls(mdTypeDef inTypeDef) { HCORENUM methodImplEnum = NULL; mdMethodDef rtkMethodBody[ENUM_BUFFER_SIZE]; mdMethodDef rtkMethodDecl[ENUM_BUFFER_SIZE]; ULONG count, totalCount=1; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumMethodImpls( &methodImplEnum, inTypeDef, rtkMethodBody, rtkMethodDecl, ARRAY_SIZE(rtkMethodBody), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("\n\tMethodImpl #%d (%08x)", totalCount, totalCount); WriteLine("\t-------------------------------------------------------"); VWriteLine("\t\tMethod Body Token : 0x%08x", rtkMethodBody[i]); VWriteLine("\t\tMethod Declaration Token : 0x%08x", rtkMethodDecl[i]); WriteLine(""); } } m_pImport->CloseEnum( methodImplEnum); } // void MDInfo::DisplayMethodImpls() // displays information about the given parameter // void MDInfo::DisplayParamInfo(mdParamDef inParamDef) { mdMethodDef md; ULONG num; WCHAR paramName[STRING_BUFFER_LEN]; ULONG nameLen; DWORD flags; VARIANT defValue; DWORD dwCPlusFlags; void const *pValue; ULONG cbValue; #ifdef FEATURE_COMINTEROP ::VariantInit(&defValue); #endif HRESULT hr = m_pImport->GetParamProps( inParamDef, &md, &num, paramName, ARRAY_SIZE(paramName), &nameLen, &flags, &dwCPlusFlags, &pValue, &cbValue); if (FAILED(hr)) Error("GetParamProps failed.", hr); _FillVariant((BYTE)dwCPlusFlags, pValue, cbValue, &defValue); char sFlags[STRING_BUFFER_LEN]; sFlags[0] = 0; ISFLAG(Pd, In); ISFLAG(Pd, Out); ISFLAG(Pd, Optional); // "Reserved" flags. ISFLAG(Pd, HasDefault); ISFLAG(Pd, HasFieldMarshal); if (!*sFlags) strcpy_s(sFlags,STRING_BUFFER_LEN, "[none]"); VWrite("\t\t\t(%ld) ParamToken : (%08x) Name : %ls flags: %s (%08x)", num, inParamDef, paramName, sFlags, flags); #ifdef FEATURE_COMINTEROP if (IsPdHasDefault(flags)) VWriteLine(" Default: (%s) %ls", g_szMapElementType[dwCPlusFlags], VariantAsString(&defValue)); else #endif VWriteLine(""); DisplayCustomAttributes(inParamDef, "\t\t\t"); #ifdef FEATURE_COMINTEROP ::VariantClear(&defValue); #endif } // void MDInfo::DisplayParamInfo() // displays all parameters for a given memberdef // void MDInfo::DisplayParams(mdMethodDef inMethodDef) { HCORENUM paramEnum = NULL; mdParamDef params[ENUM_BUFFER_SIZE]; ULONG count, paramCount; bool first = true; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumParams( &paramEnum, inMethodDef, params, ARRAY_SIZE(params), &count)) && count > 0) { if (first) { m_pImport->CountEnum( paramEnum, &paramCount); VWriteLine("\t\t%d Parameters", paramCount); } for (ULONG i = 0; i < count; i++) { DisplayParamInfo(params[i]); DisplayFieldMarshal(params[i]); } first = false; } m_pImport->CloseEnum( paramEnum); } // void MDInfo::DisplayParams() void MDInfo::DisplayGenericParams(mdToken tk, const char *prefix) { HCORENUM paramEnum = NULL; mdParamDef params[ENUM_BUFFER_SIZE]; ULONG count, paramCount; bool first = true; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumGenericParams( &paramEnum, tk, params, ARRAY_SIZE(params), &count)) && count > 0) { if (first) { m_pImport->CountEnum( paramEnum, &paramCount); VWriteLine("%s%d Generic Parameters", prefix, paramCount); } for (ULONG i = 0; i < count; i++) { DisplayGenericParamInfo(params[i], prefix); } first = false; } m_pImport->CloseEnum( paramEnum); } void MDInfo::DisplayGenericParamInfo(mdGenericParam tkParam, const char *prefix) { ULONG ulSeq; WCHAR paramName[STRING_BUFFER_LEN]; ULONG nameLen; DWORD flags; mdToken tkOwner; char newprefix[30]; HCORENUM constraintEnum = NULL; mdParamDef constraints[4]; ULONG count, constraintCount; mdToken constraint; mdToken owner; bool first = true; HRESULT hr = m_pImport->GetGenericParamProps(tkParam, &ulSeq, &flags, &tkOwner, NULL, paramName, ARRAY_SIZE(paramName), &nameLen); if (FAILED(hr)) Error("GetGenericParamProps failed.", hr); VWriteLine("%s\t(%ld) GenericParamToken : (%08x) Name : %ls flags: %08x Owner: %08x", prefix, ulSeq, tkParam, paramName, flags, tkOwner); // Any constraints for the GenericParam while (SUCCEEDED(hr = m_pImport->EnumGenericParamConstraints(&constraintEnum, tkParam, constraints, ARRAY_SIZE(constraints), &count)) && count > 0) { if (first) { m_pImport->CountEnum( constraintEnum, &constraintCount); VWriteLine("%s\t\t%d Constraint(s)", prefix, constraintCount); } VWrite("%s\t\t", prefix); for (ULONG i=0; i< count; ++i) { hr = m_pImport->GetGenericParamConstraintProps(constraints[i], &owner, &constraint); if (owner != tkParam) VWrite("%08x (owner: %08x) ", constraint, owner); else VWrite("%08x ", constraint); } VWriteLine(""); } m_pImport->CloseEnum(constraintEnum); sprintf_s(newprefix, 30, "%s\t", prefix); DisplayCustomAttributes(tkParam, newprefix); } LPCWSTR MDInfo::TokenName(mdToken inToken, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen) { LPCUTF8 pName; // Token name in UTF8. if (IsNilToken(inToken)) return W(""); m_pImport->GetNameFromToken(inToken, &pName); WszMultiByteToWideChar(CP_UTF8,0, pName,-1, buffer,bufLen); return buffer; } // LPCWSTR MDInfo::TokenName() // prints out name of typeref or typedef // LPCWSTR MDInfo::TypeDeforRefName(mdToken inToken, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen) { if (RidFromToken(inToken)) { if (TypeFromToken(inToken) == mdtTypeDef) return (TypeDefName((mdTypeDef) inToken, buffer, bufLen)); else if (TypeFromToken(inToken) == mdtTypeRef) return (TypeRefName((mdTypeRef) inToken, buffer, bufLen)); else if (TypeFromToken(inToken) == mdtTypeSpec) return W("[TypeSpec]"); else return W("[InvalidReference]"); } else return W(""); } // LPCWSTR MDInfo::TypeDeforRefName() LPCWSTR MDInfo::MemberDeforRefName(mdToken inToken, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen) { if (RidFromToken(inToken)) { if (TypeFromToken(inToken) == mdtMethodDef || TypeFromToken(inToken) == mdtFieldDef) return (MemberName(inToken, buffer, bufLen)); else if (TypeFromToken(inToken) == mdtMemberRef) return (MemberRefName((mdMemberRef) inToken, buffer, bufLen)); else return W("[InvalidReference]"); } else return W(""); } // LPCWSTR MDInfo::MemberDeforRefName() // prints out only the name of the given typedef // // LPCWSTR MDInfo::TypeDefName(mdTypeDef inTypeDef, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen) { HRESULT hr; hr = m_pImport->GetTypeDefProps( // [IN] The import scope. inTypeDef, // [IN] TypeDef token for inquiry. buffer, // [OUT] Put name here. bufLen, // [IN] size of name buffer in wide chars. NULL, // [OUT] put size of name (wide chars) here. NULL, // [OUT] Put flags here. NULL); // [OUT] Put base class TypeDef/TypeRef here. if (FAILED(hr)) { swprintf_s(buffer, bufLen, W("[Invalid TypeDef]")); } return buffer; } // LPCWSTR MDInfo::TypeDefName() // prints out all the properties of a given typedef // void MDInfo::DisplayTypeDefProps(mdTypeDef inTypeDef) { HRESULT hr; WCHAR typeDefName[STRING_BUFFER_LEN]; ULONG nameLen; DWORD flags; mdToken extends; ULONG dwPacking; // Packing size of class, if specified. ULONG dwSize; // Total size of class, if specified. hr = m_pImport->GetTypeDefProps( inTypeDef, // [IN] TypeDef token for inquiry. typeDefName, // [OUT] Put name here. STRING_BUFFER_LEN, // [IN] size of name buffer in wide chars. &nameLen, // [OUT] put size of name (wide chars) here. &flags, // [OUT] Put flags here. &extends); // [OUT] Put base class TypeDef/TypeRef here. if (FAILED(hr)) Error("GetTypeDefProps failed.", hr); char sFlags[STRING_BUFFER_LEN]; WCHAR szTempBuf[STRING_BUFFER_LEN]; VWriteLine("\tTypDefName: %ls (%8.8X)",typeDefName,inTypeDef); VWriteLine("\tFlags : %s (%08x)",ClassFlags(flags, sFlags), flags); VWriteLine("\tExtends : %8.8X [%s] %ls",extends,TokenTypeName(extends), TypeDeforRefName(extends, szTempBuf, ARRAY_SIZE(szTempBuf))); hr = m_pImport->GetClassLayout(inTypeDef, &dwPacking, 0,0,0, &dwSize); if (hr == S_OK) VWriteLine("\tLayout : Packing:%d, Size:%d", dwPacking, dwSize); if (IsTdNested(flags)) { mdTypeDef tkEnclosingClass; hr = m_pImport->GetNestedClassProps(inTypeDef, &tkEnclosingClass); if (hr == S_OK) { VWriteLine("\tEnclosingClass : %ls (%8.8X)", TypeDeforRefName(tkEnclosingClass, szTempBuf, ARRAY_SIZE(szTempBuf)), tkEnclosingClass); } else if (hr == CLDB_E_RECORD_NOTFOUND) WriteLine("ERROR: EnclosingClass not found for NestedClass"); else Error("GetNestedClassProps failed.", hr); } } // void MDInfo::DisplayTypeDefProps() // Prints out the name of the given TypeRef // LPCWSTR MDInfo::TypeRefName(mdTypeRef tr, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen) { HRESULT hr; hr = m_pImport->GetTypeRefProps( tr, // The class ref token. NULL, // Resolution scope. buffer, // Put the name here. bufLen, // Size of the name buffer, wide chars. NULL); // Put actual size of name here. if (FAILED(hr)) { swprintf_s(buffer, bufLen, W("[Invalid TypeRef]")); } return (buffer); } // LPCWSTR MDInfo::TypeRefName() // Prints out all the info of the given TypeRef // void MDInfo::DisplayTypeRefInfo(mdTypeRef tr) { HRESULT hr; mdToken tkResolutionScope; WCHAR typeRefName[STRING_BUFFER_LEN]; ULONG nameLen; hr = m_pImport->GetTypeRefProps( tr, // The class ref token. &tkResolutionScope, // ResolutionScope. typeRefName, // Put the name here. STRING_BUFFER_LEN, // Size of the name buffer, wide chars. &nameLen); // Put actual size of name here. if (FAILED(hr)) Error("GetTypeRefProps failed.", hr); VWriteLine("Token: 0x%08x", tr); VWriteLine("ResolutionScope: 0x%08x", tkResolutionScope); VWriteLine("TypeRefName: %ls",typeRefName); DisplayCustomAttributes(tr, "\t"); } // void MDInfo::DisplayTypeRefInfo() void MDInfo::DisplayTypeSpecInfo(mdTypeSpec ts, const char *preFix) { HRESULT hr; PCCOR_SIGNATURE pvSig; ULONG cbSig; ULONG cb; InitSigBuffer(); hr = m_pImport->GetTypeSpecFromToken( ts, // The class ref token. &pvSig, &cbSig); if (FAILED(hr)) Error("GetTypeSpecFromToken failed.", hr); // DisplaySignature(pvSig, cbSig, preFix); if (FAILED(hr = GetOneElementType(pvSig, cbSig, &cb))) goto ErrExit; VWriteLine("%s\tTypeSpec :%s", preFix, (LPSTR)m_sigBuf.Ptr()); // Hex, too? if (m_DumpFilter & dumpMoreHex) { char rcNewPrefix[80]; sprintf_s(rcNewPrefix, 80, "%s\tSignature", preFix); DumpHex(rcNewPrefix, pvSig, cbSig, false, 24); } ErrExit: return; } // void MDInfo::DisplayTypeSpecInfo() void MDInfo::DisplayMethodSpecInfo(mdMethodSpec ms, const char *preFix) { HRESULT hr; PCCOR_SIGNATURE pvSig; ULONG cbSig; mdToken tk; InitSigBuffer(); hr = m_pImport->GetMethodSpecProps( ms, // The MethodSpec token &tk, // The MethodDef or MemberRef &pvSig, // Signature. &cbSig); // Size of signature. VWriteLine("%s\tParent : 0x%08x", preFix, tk); DisplaySignature(pvSig, cbSig, preFix); //ErrExit: return; } // void MDInfo::DisplayMethodSpecInfo() // Return the passed-in buffer filled with a string detailing the class flags // associated with the class. // char *MDInfo::ClassFlags(DWORD flags, _Out_writes_(STRING_BUFFER_LEN) char *sFlags) { sFlags[0] = 0; ISFLAG(Td, NotPublic); ISFLAG(Td, Public); ISFLAG(Td, NestedPublic); ISFLAG(Td, NestedPrivate); ISFLAG(Td, NestedFamily); ISFLAG(Td, NestedAssembly); ISFLAG(Td, NestedFamANDAssem); ISFLAG(Td, NestedFamORAssem); ISFLAG(Td, AutoLayout); ISFLAG(Td, SequentialLayout); ISFLAG(Td, ExplicitLayout); ISFLAG(Td, Class); ISFLAG(Td, Interface); ISFLAG(Td, Abstract); ISFLAG(Td, Sealed); ISFLAG(Td, SpecialName); ISFLAG(Td, Import); ISFLAG(Td, Serializable); ISFLAG(Td, AnsiClass); ISFLAG(Td, UnicodeClass); ISFLAG(Td, AutoClass); ISFLAG(Td, BeforeFieldInit); ISFLAG(Td, Forwarder); // "Reserved" flags ISFLAG(Td, RTSpecialName); ISFLAG(Td, HasSecurity); ISFLAG(Td, WindowsRuntime); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); return sFlags; } // char *MDInfo::ClassFlags() // prints out all info on the given typeDef, including all information that // is specific to a given typedef // void MDInfo::DisplayTypeDefInfo(mdTypeDef inTypeDef) { DisplayTypeDefProps(inTypeDef); // Get field layout information. HRESULT hr = NOERROR; COR_FIELD_OFFSET *rFieldOffset = NULL; ULONG cFieldOffset = 0; hr = m_pImport->GetClassLayout(inTypeDef, NULL, rFieldOffset, 0, &cFieldOffset, NULL); if (SUCCEEDED(hr) && cFieldOffset) { rFieldOffset = new COR_FIELD_OFFSET[cFieldOffset]; if (rFieldOffset == NULL) Error("_calloc failed.", E_OUTOFMEMORY); hr = m_pImport->GetClassLayout(inTypeDef, NULL, rFieldOffset, cFieldOffset, &cFieldOffset, NULL); if (FAILED(hr)) { delete [] rFieldOffset; Error("GetClassLayout() failed.", hr); } } //No reason to display members if we're displaying fields and methods separately DisplayGenericParams(inTypeDef, "\t"); DisplayFields(inTypeDef, rFieldOffset, cFieldOffset); delete [] rFieldOffset; DisplayMethods(inTypeDef); DisplayProperties(inTypeDef); DisplayEvents(inTypeDef); DisplayMethodImpls(inTypeDef); DisplayPermissions(inTypeDef, ""); DisplayInterfaceImpls(inTypeDef); DisplayCustomAttributes(inTypeDef, "\t"); } // void MDInfo::DisplayTypeDefInfo() // print out information about every the given typeDef's interfaceImpls // void MDInfo::DisplayInterfaceImpls(mdTypeDef inTypeDef) { HCORENUM interfaceImplEnum = NULL; mdTypeRef interfaceImpls[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; HRESULT hr; while(SUCCEEDED(hr = m_pImport->EnumInterfaceImpls( &interfaceImplEnum, inTypeDef,interfaceImpls,ARRAY_SIZE(interfaceImpls), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("\tInterfaceImpl #%d (%08x)", totalCount, interfaceImpls[i]); WriteLine("\t-------------------------------------------------------"); DisplayInterfaceImplInfo(interfaceImpls[i]); DisplayPermissions(interfaceImpls[i], "\t"); WriteLine(""); } } m_pImport->CloseEnum( interfaceImplEnum); } // void MDInfo::DisplayInterfaceImpls() // print the information for the given interface implementation // void MDInfo::DisplayInterfaceImplInfo(mdInterfaceImpl inImpl) { mdTypeDef typeDef; mdToken token; HRESULT hr; WCHAR szTempBuf[STRING_BUFFER_LEN]; hr = m_pImport->GetInterfaceImplProps( inImpl, &typeDef, &token); if (FAILED(hr)) Error("GetInterfaceImplProps failed.", hr); VWriteLine("\t\tClass : %ls",TypeDeforRefName(typeDef, szTempBuf, ARRAY_SIZE(szTempBuf))); VWriteLine("\t\tToken : %8.8X [%s] %ls",token,TokenTypeName(token), TypeDeforRefName(token, szTempBuf, ARRAY_SIZE(szTempBuf))); DisplayCustomAttributes(inImpl, "\t\t"); } // void MDInfo::DisplayInterfaceImplInfo() // displays the information for a particular property // void MDInfo::DisplayPropertyInfo(mdProperty inProp) { HRESULT hr; mdTypeDef typeDef; WCHAR propName[STRING_BUFFER_LEN]; DWORD flags; #ifdef FEATURE_COMINTEROP VARIANT defaultValue; #endif void const *pValue; ULONG cbValue; DWORD dwCPlusTypeFlag; mdMethodDef setter, getter, otherMethod[ENUM_BUFFER_SIZE]; ULONG others; PCCOR_SIGNATURE pbSigBlob; ULONG ulSigBlob; #ifdef FEATURE_COMINTEROP ::VariantInit(&defaultValue); #endif hr = m_pImport->GetPropertyProps( inProp, // [IN] property token &typeDef, // [OUT] typedef containing the property declarion. propName, // [OUT] Property name STRING_BUFFER_LEN, // [IN] the count of wchar of szProperty NULL, // [OUT] actual count of wchar for property name &flags, // [OUT] property flags. &pbSigBlob, // [OUT] Signature Blob. &ulSigBlob, // [OUT] Number of bytes in the signature blob. &dwCPlusTypeFlag, // [OUT] default value &pValue, &cbValue, &setter, // [OUT] setter method of the property &getter, // [OUT] getter method of the property otherMethod, // [OUT] other methods of the property ENUM_BUFFER_SIZE, // [IN] size of rmdOtherMethod &others); // [OUT] total number of other method of this property if (FAILED(hr)) Error("GetPropertyProps failed.", hr); VWriteLine("\t\tProp.Name : %ls (%8.8X)",propName,inProp); char sFlags[STRING_BUFFER_LEN]; sFlags[0] = 0; ISFLAG(Pr, SpecialName); ISFLAG(Pr, RTSpecialName); ISFLAG(Pr, HasDefault); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); VWriteLine("\t\tFlags : %s (%08x)", sFlags, flags); if (ulSigBlob) DisplaySignature(pbSigBlob, ulSigBlob, ""); else VWriteLine("\t\tERROR: no valid signature "); WCHAR szTempBuf[STRING_BUFFER_LEN]; #ifdef FEATURE_COMINTEROP _FillVariant((BYTE)dwCPlusTypeFlag, pValue, cbValue, &defaultValue); VWriteLine("\t\tDefltValue: %ls",VariantAsString(&defaultValue)); #endif VWriteLine("\t\tSetter : (%08x) %ls",setter,MemberDeforRefName(setter, szTempBuf, ARRAY_SIZE(szTempBuf))); VWriteLine("\t\tGetter : (%08x) %ls",getter,MemberDeforRefName(getter, szTempBuf, ARRAY_SIZE(szTempBuf))); // do something with others? VWriteLine("\t\t%ld Others",others); DisplayCustomAttributes(inProp, "\t\t"); #ifdef FEATURE_COMINTEROP ::VariantClear(&defaultValue); #endif } // void MDInfo::DisplayPropertyInfo() // displays info for each property // void MDInfo::DisplayProperties(mdTypeDef inTypeDef) { HCORENUM propEnum = NULL; mdProperty props[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; HRESULT hr; while(SUCCEEDED(hr = m_pImport->EnumProperties( &propEnum, inTypeDef,props,ARRAY_SIZE(props), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("\tProperty #%d (%08x)", totalCount, props[i]); WriteLine("\t-------------------------------------------------------"); DisplayPropertyInfo(props[i]); DisplayPermissions(props[i], "\t"); WriteLine(""); } } m_pImport->CloseEnum( propEnum); } // void MDInfo::DisplayProperties() // Display all information about a particular event // void MDInfo::DisplayEventInfo(mdEvent inEvent) { HRESULT hr; mdTypeDef typeDef; WCHAR eventName[STRING_BUFFER_LEN]; DWORD flags; mdToken eventType; mdMethodDef addOn, removeOn, fire, otherMethod[ENUM_BUFFER_SIZE]; ULONG totalOther; hr = m_pImport->GetEventProps( // [IN] The scope. inEvent, // [IN] event token &typeDef, // [OUT] typedef containing the event declarion. eventName, // [OUT] Event name STRING_BUFFER_LEN, // [IN] the count of wchar of szEvent NULL, // [OUT] actual count of wchar for event's name &flags, // [OUT] Event flags. &eventType, // [OUT] EventType class &addOn, // [OUT] AddOn method of the event &removeOn, // [OUT] RemoveOn method of the event &fire, // [OUT] Fire method of the event otherMethod, // [OUT] other method of the event ARRAY_SIZE(otherMethod), // [IN] size of rmdOtherMethod &totalOther); // [OUT] total number of other method of this event if (FAILED(hr)) Error("GetEventProps failed.", hr); VWriteLine("\t\tName : %ls (%8.8X)",eventName,inEvent); char sFlags[STRING_BUFFER_LEN]; sFlags[0] = 0; ISFLAG(Ev, SpecialName); ISFLAG(Ev, RTSpecialName); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); VWriteLine("\t\tFlags : %s (%08x)", sFlags, flags); WCHAR szTempBuf[STRING_BUFFER_LEN]; VWriteLine("\t\tEventType : %8.8X [%s]",eventType,TokenTypeName(eventType)); VWriteLine("\t\tAddOnMethd: (%08x) %ls",addOn,MemberDeforRefName(addOn, szTempBuf, ARRAY_SIZE(szTempBuf))); VWriteLine("\t\tRmvOnMethd: (%08x) %ls",removeOn,MemberDeforRefName(removeOn, szTempBuf, ARRAY_SIZE(szTempBuf))); VWriteLine("\t\tFireMethod: (%08x) %ls",fire,MemberDeforRefName(fire, szTempBuf, ARRAY_SIZE(szTempBuf))); VWriteLine("\t\t%ld OtherMethods",totalOther); DisplayCustomAttributes(inEvent, "\t\t"); } // void MDInfo::DisplayEventInfo() // Display information about all events in a typedef // void MDInfo::DisplayEvents(mdTypeDef inTypeDef) { HCORENUM eventEnum = NULL; mdProperty events[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; HRESULT hr; while(SUCCEEDED(hr = m_pImport->EnumEvents( &eventEnum, inTypeDef,events,ARRAY_SIZE(events), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("\tEvent #%d (%08x)", totalCount, events[i]); WriteLine("\t-------------------------------------------------------"); DisplayEventInfo(events[i]); DisplayPermissions(events[i], "\t"); WriteLine(""); } } m_pImport->CloseEnum( eventEnum); } // void MDInfo::DisplayEvents() // print info for the passed-in custom attribute // This function is used to print the custom attribute information for both TypeDefs and // MethodDefs which need slightly different formatting. preFix helps fix it up. // void MDInfo::DisplayCustomAttributeInfo(mdCustomAttribute inValue, const char *preFix) { const BYTE *pValue; // The custom value. ULONG cbValue; // Length of the custom value. HRESULT hr; // A result. mdToken tkObj; // Attributed object. mdToken tkType; // Type of the custom attribute. mdToken tk; // For name lookup. LPCUTF8 pMethName=0; // Name of custom attribute ctor, if any. CQuickBytes qSigName; // Buffer to pretty-print signature. PCCOR_SIGNATURE pSig=0; // Signature of ctor. ULONG cbSig; // Size of the signature. BOOL bCoffSymbol = false; // true for coff symbol CA's. WCHAR rcName[MAX_CLASS_NAME]; // Name of the type. hr = m_pImport->GetCustomAttributeProps( // S_OK or error. inValue, // The attribute. &tkObj, // The attributed object &tkType, // The attributes type. (const void**)&pValue, // Put pointer to data here. &cbValue); // Put size here. if (FAILED(hr)) Error("GetCustomAttributeProps failed.", hr); VWriteLine("%s\tCustomAttribute Type: %08x", preFix, tkType); // Get the name of the memberref or methoddef. tk = tkType; rcName[0] = L'\0'; // Get the member name, and the parent token. switch (TypeFromToken(tk)) { case mdtMemberRef: hr = m_pImport->GetNameFromToken(tk, &pMethName); if (FAILED(hr)) Error("GetNameFromToken failed.", hr); hr = m_pImport->GetMemberRefProps( tk, &tk, 0, 0, 0, &pSig, &cbSig); if (FAILED(hr)) Error("GetMemberRefProps failed.", hr); break; case mdtMethodDef: hr = m_pImport->GetNameFromToken(tk, &pMethName); if (FAILED(hr)) Error("GetNameFromToken failed.", hr); hr = m_pImport->GetMethodProps(tk, &tk, 0, 0, 0, 0, &pSig, &cbSig, 0, 0); if (FAILED(hr)) Error("GetMethodProps failed.", hr); break; } // switch // Get the type name. switch (TypeFromToken(tk)) { case mdtTypeDef: hr = m_pImport->GetTypeDefProps(tk, rcName,MAX_CLASS_NAME,0, 0,0); if (FAILED(hr)) Error("GetTypeDefProps failed.", hr); break; case mdtTypeRef: hr = m_pImport->GetTypeRefProps(tk, 0, rcName,MAX_CLASS_NAME,0); if (FAILED(hr)) Error("GetTypeRefProps failed.", hr); break; } // switch if (pSig && pMethName) { int iLen; LPWSTR pwzName = (LPWSTR)(new WCHAR[iLen= 1+(ULONG32)strlen(pMethName)]); if(pwzName) { WszMultiByteToWideChar(CP_UTF8,0, pMethName,-1, pwzName,iLen); PrettyPrintSigLegacy(pSig, cbSig, pwzName, &qSigName, m_pImport); delete [] pwzName; } } VWrite("%s\tCustomAttributeName: %ls", preFix, rcName); if (pSig && pMethName) VWrite(" :: %S", qSigName.Ptr()); // Keep track of coff overhead. if (!wcscmp(W("__DecoratedName"), rcName)) { bCoffSymbol = true; g_cbCoffNames += cbValue + 6; } WriteLine(""); VWriteLine("%s\tLength: %ld", preFix, cbValue); char newPreFix[40]; sprintf_s(newPreFix, 40, "%s\tValue ", preFix); DumpHex(newPreFix, pValue, cbValue); if (bCoffSymbol) VWriteLine("%s\t %s", preFix, pValue); // Try to decode the constructor blob. This is incomplete, but covers the most popular cases. if (pSig) { // Interpret the signature. PCCOR_SIGNATURE ps = pSig; ULONG cb; ULONG ulData; ULONG cParams; ULONG ulVal; UINT8 u1 = 0; UINT16 u2 = 0; UINT32 u4 = 0; UINT64 u8 = 0; unsigned __int64 uI64; double dblVal; ULONG cbVal; LPCUTF8 pStr; CustomAttributeParser CA(pValue, cbValue); CA.ValidateProlog(); // Get the calling convention. cb = CorSigUncompressData(ps, &ulData); ps += cb; // Get the count of params. cb = CorSigUncompressData(ps, &cParams); ps += cb; // Get the return value. cb = CorSigUncompressData(ps, &ulData); ps += cb; if (ulData == ELEMENT_TYPE_VOID) { VWrite("%s\tctor args: (", preFix); // For each param... for (ULONG i=0; i<cParams; ++i) { // Get the next param type. cb = CorSigUncompressData(ps, &ulData); ps += cb; if (i) Write(", "); DoObject: switch (ulData) { // For ET_OBJECT, the next byte in the blob is the ET of the actual data. case ELEMENT_TYPE_OBJECT: CA.GetU1(&u1); ulData = u1; goto DoObject; case ELEMENT_TYPE_I1: case ELEMENT_TYPE_U1: CA.GetU1(&u1); ulVal = u1; goto PrintVal; case ELEMENT_TYPE_I2: case ELEMENT_TYPE_U2: CA.GetU2(&u2); ulVal = u2; goto PrintVal; case ELEMENT_TYPE_I4: case ELEMENT_TYPE_U4: CA.GetU4(&u4); ulVal = u4; PrintVal: VWrite("%d", ulVal); break; case ELEMENT_TYPE_STRING: CA.GetString(&pStr, &cbVal); VWrite("\"%s\"", pStr); break; // The only class type that we accept is Type, which is stored as a string. case ELEMENT_TYPE_CLASS: // Eat the class type. cb = CorSigUncompressData(ps, &ulData); ps += cb; // Get the name of the type. CA.GetString(&pStr, &cbVal); VWrite("typeof(%s)", pStr); break; case SERIALIZATION_TYPE_TYPE: CA.GetString(&pStr, &cbVal); VWrite("typeof(%s)", pStr); break; case ELEMENT_TYPE_I8: case ELEMENT_TYPE_U8: CA.GetU8(&u8); uI64 = u8; VWrite("%#lx", uI64); break; case ELEMENT_TYPE_R4: dblVal = CA.GetR4(); VWrite("%f", dblVal); break; case ELEMENT_TYPE_R8: dblVal = CA.GetR8(); VWrite("%f", dblVal); break; default: // bail... i = cParams; Write(" <can not decode> "); break; } } WriteLine(")"); } } WriteLine(""); } // void MDInfo::DisplayCustomAttributeInfo() // Print all custom values for the given token // This function is used to print the custom value information for all tokens. // which need slightly different formatting. preFix helps fix it up. // void MDInfo::DisplayCustomAttributes(mdToken inToken, const char *preFix) { HCORENUM customAttributeEnum = NULL; mdTypeRef customAttributes[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; HRESULT hr; while(SUCCEEDED(hr = m_pImport->EnumCustomAttributes( &customAttributeEnum, inToken, 0, customAttributes, ARRAY_SIZE(customAttributes), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("%sCustomAttribute #%d (%08x)", preFix, totalCount, customAttributes[i]); VWriteLine("%s-------------------------------------------------------", preFix); DisplayCustomAttributeInfo(customAttributes[i], preFix); } } m_pImport->CloseEnum( customAttributeEnum); } // void MDInfo::DisplayCustomAttributes() // Show the passed-in token's permissions // // void MDInfo::DisplayPermissions(mdToken tk, const char *preFix) { HCORENUM permissionEnum = NULL; mdPermission permissions[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumPermissionSets( &permissionEnum, tk, 0, permissions, ARRAY_SIZE(permissions), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("%s\tPermission #%d (%08x)", preFix, totalCount, permissions[i]); VWriteLine("%s\t-------------------------------------------------------", preFix); DisplayPermissionInfo(permissions[i], preFix); WriteLine(""); } } m_pImport->CloseEnum( permissionEnum); } // void MDInfo::DisplayPermissions() // print properties of given rolecheck // // void MDInfo::DisplayPermissionInfo(mdPermission inPermission, const char *preFix) { DWORD dwAction; const BYTE *pvPermission; ULONG cbPermission; const char *flagDesc = NULL; char newPreFix[STRING_BUFFER_LEN]; HRESULT hr; hr = m_pImport->GetPermissionSetProps( inPermission, &dwAction, (const void**)&pvPermission, &cbPermission); if (FAILED(hr)) Error("GetPermissionSetProps failed.", hr); switch(dwAction) { case dclActionNil: flagDesc = "ActionNil"; break; case dclRequest: flagDesc = "Request"; break; case dclDemand: flagDesc = "Demand"; break; case dclAssert: flagDesc = "Assert"; break; case dclDeny: flagDesc = "Deny"; break; case dclPermitOnly: flagDesc = "PermitOnly"; break; case dclLinktimeCheck: flagDesc = "LinktimeCheck"; break; case dclInheritanceCheck: flagDesc = "InheritanceCheck"; break; case dclRequestMinimum: flagDesc = "RequestMinimum"; break; case dclRequestOptional: flagDesc = "RequestOptional"; break; case dclRequestRefuse: flagDesc = "RequestRefuse"; break; case dclPrejitGrant: flagDesc = "PrejitGrant"; break; case dclPrejitDenied: flagDesc = "PrejitDenied"; break; case dclNonCasDemand: flagDesc = "NonCasDemand"; break; case dclNonCasLinkDemand: flagDesc = "NonCasLinkDemand"; break; case dclNonCasInheritance: flagDesc = "NonCasInheritance"; break; } VWriteLine("%s\t\tAction : %s", preFix, flagDesc); VWriteLine("%s\t\tBlobLen : %d", preFix, cbPermission); if (cbPermission) { sprintf_s(newPreFix, STRING_BUFFER_LEN, "%s\tBlob", preFix); DumpHex(newPreFix, pvPermission, cbPermission, false, 24); } sprintf_s (newPreFix, STRING_BUFFER_LEN, "\t\t%s", preFix); DisplayCustomAttributes(inPermission, newPreFix); } // void MDInfo::DisplayPermissionInfo() // simply prints out the given GUID in standard form LPWSTR MDInfo::GUIDAsString(GUID inGuid, _Out_writes_(bufLen) LPWSTR guidString, ULONG bufLen) { StringFromGUID2(inGuid, guidString, bufLen); return guidString; } // LPWSTR MDInfo::GUIDAsString() #ifdef FEATURE_COMINTEROP LPCWSTR MDInfo::VariantAsString(VARIANT *pVariant) { HRESULT hr = S_OK; if (V_VT(pVariant) == VT_UNKNOWN) { _ASSERTE(V_UNKNOWN(pVariant) == NULL); return W("<NULL>"); } else if (SUCCEEDED(hr = ::VariantChangeType(pVariant, pVariant, 0, VT_BSTR))) return V_BSTR(pVariant); else if (hr == DISP_E_BADVARTYPE && V_VT(pVariant) == VT_I8) { // allocate the bstr. char szStr[32]; WCHAR wszStr[32]; // Set variant type to bstr. V_VT(pVariant) = VT_BSTR; // Create the ansi string. sprintf_s(szStr, 32, "%I64d", V_CY(pVariant).int64); // Convert to unicode. WszMultiByteToWideChar(CP_ACP, 0, szStr, -1, wszStr, 32); // convert to bstr and set variant value. V_BSTR(pVariant) = ::SysAllocString(wszStr); if (V_BSTR(pVariant) == NULL) Error("SysAllocString() failed.", E_OUTOFMEMORY); return V_BSTR(pVariant); } else return W("ERROR"); } // LPWSTR MDInfo::VariantAsString() #endif bool TrySigUncompress(PCCOR_SIGNATURE pData, // [IN] compressed data ULONG *pDataOut, // [OUT] the expanded *pData ULONG *cbCur) { ULONG ulSize = CorSigUncompressData(pData, pDataOut); if (ulSize == (ULONG)-1) { *cbCur = ulSize; return false; } else { *cbCur += ulSize; return true; } } void MDInfo::DisplayFieldMarshal(mdToken inToken) { PCCOR_SIGNATURE pvNativeType; // [OUT] native type of this field ULONG cbNativeType; // [OUT] the count of bytes of *ppvNativeType HRESULT hr; hr = m_pImport->GetFieldMarshal( inToken, &pvNativeType, &cbNativeType); if (FAILED(hr) && hr != CLDB_E_RECORD_NOTFOUND) Error("GetFieldMarshal failed.", hr); if (hr != CLDB_E_RECORD_NOTFOUND) { ULONG cbCur = 0; ULONG ulData; ULONG ulStrLoc; char szNTDesc[STRING_BUFFER_LEN]; while (cbCur < cbNativeType) { ulStrLoc = 0; ulData = NATIVE_TYPE_MAX; if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur)) continue; if (ulData >= sizeof(g_szNativeType)/sizeof(*g_szNativeType)) { cbCur = (ULONG)-1; continue; } ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "%s ", g_szNativeType[ulData]); switch (ulData) { case NATIVE_TYPE_FIXEDSYSSTRING: { if (cbCur < cbNativeType) { if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur)) continue; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{StringElementCount: %d} ",ulData); } } break; case NATIVE_TYPE_FIXEDARRAY: { if (cbCur < cbNativeType) { if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur)) continue; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{ArrayElementCount: %d",ulData); if (cbCur < cbNativeType) { if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur)) continue; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, ", ArrayElementType(NT): %d",ulData); } ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc,"}"); } } break; case NATIVE_TYPE_ARRAY: { if (cbCur < cbNativeType) { BOOL bElemTypeSpecified; if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur)) continue; if (ulData != NATIVE_TYPE_MAX) { ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{ArrayElementType(NT): %d", ulData); bElemTypeSpecified = TRUE; } else { ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{"); bElemTypeSpecified = FALSE; } if (cbCur < cbNativeType) { if (bElemTypeSpecified) ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, ", "); if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur)) continue; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "SizeParamIndex: %d",ulData); if (cbCur < cbNativeType) { if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur)) continue; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, ", SizeParamMultiplier: %d",ulData); if (cbCur < cbNativeType) { if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur)) continue; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, ", SizeConst: %d",ulData); } } } ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "}"); } } break; case NATIVE_TYPE_SAFEARRAY: { if (cbCur < cbNativeType) { if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur)) continue; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{SafeArraySubType(VT): %d, ",ulData); // Extract the element type name if it is specified. if (cbCur < cbNativeType) { LPUTF8 strTemp = NULL; int strLen = 0; int ByteCountLength = 0; strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength); cbCur += ByteCountLength; strTemp = (LPUTF8)(new char[strLen + 1]); if(strTemp) { memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen); strTemp[strLen] = 0; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "ElementTypeName: %s}", strTemp); cbCur += strLen; _ASSERTE(cbCur == cbNativeType); delete [] strTemp; } } else { ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "ElementTypeName: }"); } } } break; case NATIVE_TYPE_CUSTOMMARSHALER: { LPUTF8 strTemp = NULL; int strLen = 0; int ByteCountLength = 0; // Extract the typelib GUID. strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength); cbCur += ByteCountLength; strTemp = (LPUTF8)(new char[strLen + 1]); if(strTemp) { memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen); strTemp[strLen] = 0; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{Typelib: %s, ", strTemp); cbCur += strLen; _ASSERTE(cbCur < cbNativeType); delete [] strTemp; } // Extract the name of the native type. strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength); cbCur += ByteCountLength; strTemp = (LPUTF8)(new char[strLen + 1]); if(strTemp) { memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen); strTemp[strLen] = 0; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "Native: %s, ", strTemp); cbCur += strLen; _ASSERTE(cbCur < cbNativeType); delete [] strTemp; } // Extract the name of the custom marshaler. strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength); cbCur += ByteCountLength; strTemp = (LPUTF8)(new char[strLen + 1]); if(strTemp) { memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen); strTemp[strLen] = 0; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "Marshaler: %s, ", strTemp); cbCur += strLen; _ASSERTE(cbCur < cbNativeType); delete [] strTemp; } // Extract the cookie string. strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength); cbCur += ByteCountLength; if (strLen > 0) { strTemp = (LPUTF8)(new char[strLen + 1]); if(strTemp) { memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen); strTemp[strLen] = 0; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "Cookie: "); // Copy the cookie string and transform the embedded nulls into \0's. for (int i = 0; i < strLen - 1; i++, cbCur++) { if (strTemp[i] == 0) ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "\\0"); else szNTDesc[ulStrLoc++] = strTemp[i]; } szNTDesc[ulStrLoc++] = strTemp[strLen - 1]; cbCur++; delete [] strTemp; } } else { ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "Cookie: "); } // Finish the custom marshaler native type description. ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "}"); _ASSERTE(cbCur <= cbNativeType); } break; default: { // normal nativetype element: do nothing } } VWriteLine("\t\t\t\t%s",szNTDesc); if (ulData >= NATIVE_TYPE_MAX) break; } if (cbCur == (ULONG)-1) { // There was something that we didn't grok in the signature. // Just dump out the blob as hex VWrite("\t\t\t\t{", szNTDesc); while (cbNativeType--) VWrite(" %2.2X", *pvNativeType++); VWriteLine(" }"); } } } // void MDInfo::DisplayFieldMarshal() void MDInfo::DisplayPinvokeInfo(mdToken inToken) { HRESULT hr = NOERROR; DWORD flags; WCHAR rcImport[512]; mdModuleRef tkModuleRef; char sFlags[STRING_BUFFER_LEN]; hr = m_pImport->GetPinvokeMap(inToken, &flags, rcImport, ARRAY_SIZE(rcImport), 0, &tkModuleRef); if (FAILED(hr)) { if (hr != CLDB_E_RECORD_NOTFOUND) VWriteLine("ERROR: GetPinvokeMap failed.", hr); return; } WriteLine("\t\tPinvoke Map Data:"); VWriteLine("\t\tEntry point: %S", rcImport); VWriteLine("\t\tModule ref: %08x", tkModuleRef); sFlags[0] = 0; ISFLAG(Pm, NoMangle); ISFLAG(Pm, CharSetNotSpec); ISFLAG(Pm, CharSetAnsi); ISFLAG(Pm, CharSetUnicode); ISFLAG(Pm, CharSetAuto); ISFLAG(Pm, SupportsLastError); ISFLAG(Pm, CallConvWinapi); ISFLAG(Pm, CallConvCdecl); ISFLAG(Pm, CallConvStdcall); ISFLAG(Pm, CallConvThiscall); ISFLAG(Pm, CallConvFastcall); ISFLAG(Pm, BestFitEnabled); ISFLAG(Pm, BestFitDisabled); ISFLAG(Pm, BestFitUseAssem); ISFLAG(Pm, ThrowOnUnmappableCharEnabled); ISFLAG(Pm, ThrowOnUnmappableCharDisabled); ISFLAG(Pm, ThrowOnUnmappableCharUseAssem); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); VWriteLine("\t\tMapping flags: %s (%08x)", sFlags, flags); } // void MDInfo::DisplayPinvokeInfo() ///////////////////////////////////////////////////////////////////////// // void DisplaySignature(PCCOR_SIGNATURE pbSigBlob, ULONG ulSigBlob); // // Display COM+ signature -- taken from cordump.cpp's DumpSignature ///////////////////////////////////////////////////////////////////////// void MDInfo::DisplaySignature(PCCOR_SIGNATURE pbSigBlob, ULONG ulSigBlob, const char *preFix) { ULONG cbCur = 0; ULONG cb; // 428793: Prefix complained correctly about unitialized data. ULONG ulData = (ULONG) IMAGE_CEE_CS_CALLCONV_MAX; ULONG ulArgs; HRESULT hr = NOERROR; ULONG ulSigBlobStart = ulSigBlob; // initialize sigBuf InitSigBuffer(); cb = CorSigUncompressData(pbSigBlob, &ulData); VWriteLine("%s\t\tCallCnvntn: %s", preFix, (g_strCalling[ulData & IMAGE_CEE_CS_CALLCONV_MASK])); if (cb>ulSigBlob) goto ErrExit; cbCur += cb; ulSigBlob -= cb; if (ulData & IMAGE_CEE_CS_CALLCONV_HASTHIS) VWriteLine("%s\t\thasThis ", preFix); if (ulData & IMAGE_CEE_CS_CALLCONV_EXPLICITTHIS) VWriteLine("%s\t\texplicit ", preFix); if (ulData & IMAGE_CEE_CS_CALLCONV_GENERIC) VWriteLine("%s\t\tgeneric ", preFix); // initialize sigBuf InitSigBuffer(); if ( isCallConv(ulData,IMAGE_CEE_CS_CALLCONV_FIELD) ) { // display field type if (FAILED(hr = GetOneElementType(&pbSigBlob[cbCur], ulSigBlob, &cb))) goto ErrExit; VWriteLine("%s\t\tField type: %s", preFix, (LPSTR)m_sigBuf.Ptr()); if (cb>ulSigBlob) goto ErrExit; cbCur += cb; ulSigBlob -= cb; } else { if (ulData & IMAGE_CEE_CS_CALLCONV_GENERIC) { ULONG ulTyArgs; cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulTyArgs); if (cb>ulSigBlob) goto ErrExit; cbCur += cb; ulSigBlob -= cb; VWriteLine("%s\t\tType Arity:%d ", preFix, ulTyArgs); } cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulArgs); if (cb>ulSigBlob) goto ErrExit; cbCur += cb; ulSigBlob -= cb; if (ulData != IMAGE_CEE_CS_CALLCONV_LOCAL_SIG && ulData != IMAGE_CEE_CS_CALLCONV_GENERICINST) { // display return type when it is not a local varsig if (FAILED(hr = GetOneElementType(&pbSigBlob[cbCur], ulSigBlob, &cb))) goto ErrExit; VWriteLine("%s\t\tReturnType:%s", preFix, (LPSTR)m_sigBuf.Ptr()); if (cb>ulSigBlob) goto ErrExit; cbCur += cb; ulSigBlob -= cb; } // display count of argument // display arguments if (ulSigBlob) VWriteLine("%s\t\t%ld Arguments", preFix, ulArgs); else VWriteLine("%s\t\tNo arguments.", preFix); ULONG i = 0; while (i < ulArgs && ulSigBlob > 0) { ULONG ulDataTemp; // Handle the sentinal for varargs because it isn't counted in the args. CorSigUncompressData(&pbSigBlob[cbCur], &ulDataTemp); ++i; // initialize sigBuf InitSigBuffer(); if (FAILED(hr = GetOneElementType(&pbSigBlob[cbCur], ulSigBlob, &cb))) goto ErrExit; VWriteLine("%s\t\t\tArgument #%ld: %s",preFix, i, (LPSTR)m_sigBuf.Ptr()); if (cb>ulSigBlob) goto ErrExit; cbCur += cb; ulSigBlob -= cb; } } // Nothing consumed but not yet counted. cb = 0; ErrExit: // We should have consumed all signature blob. If not, dump the sig in hex. // Also dump in hex if so requested. if (m_DumpFilter & dumpMoreHex || ulSigBlob != 0) { // Did we not consume enough, or try to consume too much? if (cb > ulSigBlob) WriteLine("\tERROR IN SIGNATURE: Signature should be larger."); else if (cb < ulSigBlob) { VWrite("\tERROR IN SIGNATURE: Not all of signature blob was consumed. %d byte(s) remain", ulSigBlob); // If it is short, just append it to the end. if (ulSigBlob < 4) { Write(": "); for (; ulSigBlob; ++cbCur, --ulSigBlob) VWrite("%02x ", pbSigBlob[cbCur]); WriteLine(""); goto ErrExit2; } WriteLine(""); } // Any appropriate error message has been issued. Dump sig in hex, as determined // by error or command line switch. cbCur = 0; ulSigBlob = ulSigBlobStart; char rcNewPrefix[80]; sprintf_s(rcNewPrefix, 80, "%s\t\tSignature ", preFix); DumpHex(rcNewPrefix, pbSigBlob, ulSigBlob, false, 24); } ErrExit2: if (FAILED(hr)) Error("ERROR!! Bad signature blob value!"); return; } // void MDInfo::DisplaySignature() ///////////////////////////////////////////////////////////////////////// // HRESULT GetOneElementType(mdScope tkScope, BYTE *pbSigBlob, ULONG ulSigBlob, ULONG *pcb) // // Adds description of element type to the end of buffer -- caller must ensure // buffer is large enough. ///////////////////////////////////////////////////////////////////////// HRESULT MDInfo::GetOneElementType(PCCOR_SIGNATURE pbSigBlob, ULONG ulSigBlob, ULONG *pcb) { HRESULT hr = S_OK; // A result. ULONG cbCur = 0; ULONG cb; ULONG ulData = ELEMENT_TYPE_MAX; ULONG ulTemp; int iTemp = 0; mdToken tk; cb = CorSigUncompressData(pbSigBlob, &ulData); cbCur += cb; // Handle the modifiers. if (ulData & ELEMENT_TYPE_MODIFIER) { if (ulData == ELEMENT_TYPE_SENTINEL) IfFailGo(AddToSigBuffer("<ELEMENT_TYPE_SENTINEL>")); else if (ulData == ELEMENT_TYPE_PINNED) IfFailGo(AddToSigBuffer("PINNED")); else { hr = E_FAIL; goto ErrExit; } if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb))) goto ErrExit; cbCur += cb; goto ErrExit; } // Handle the underlying element types. if (ulData >= ELEMENT_TYPE_MAX) { hr = E_FAIL; goto ErrExit; } while (ulData == ELEMENT_TYPE_PTR || ulData == ELEMENT_TYPE_BYREF) { IfFailGo(AddToSigBuffer(" ")); IfFailGo(AddToSigBuffer(g_szMapElementType[ulData])); cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData); cbCur += cb; } IfFailGo(AddToSigBuffer(" ")); IfFailGo(AddToSigBuffer(g_szMapElementType[ulData])); if (CorIsPrimitiveType((CorElementType)ulData) || ulData == ELEMENT_TYPE_TYPEDBYREF || ulData == ELEMENT_TYPE_OBJECT || ulData == ELEMENT_TYPE_I || ulData == ELEMENT_TYPE_U) { // If this is a primitive type, we are done goto ErrExit; } if (ulData == ELEMENT_TYPE_VALUETYPE || ulData == ELEMENT_TYPE_CLASS || ulData == ELEMENT_TYPE_CMOD_REQD || ulData == ELEMENT_TYPE_CMOD_OPT) { cb = CorSigUncompressToken(&pbSigBlob[cbCur], &tk); cbCur += cb; // get the name of type ref. Don't care if truncated if (TypeFromToken(tk) == mdtTypeDef || TypeFromToken(tk) == mdtTypeRef) { sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %ls",TypeDeforRefName(tk, m_szTempBuf, ARRAY_SIZE(m_szTempBuf))); IfFailGo(AddToSigBuffer(m_tempFormatBuffer)); } else { _ASSERTE(TypeFromToken(tk) == mdtTypeSpec); sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %8x", tk); IfFailGo(AddToSigBuffer(m_tempFormatBuffer)); } if (ulData == ELEMENT_TYPE_CMOD_REQD || ulData == ELEMENT_TYPE_CMOD_OPT) { if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb))) goto ErrExit; cbCur += cb; } goto ErrExit; } if (ulData == ELEMENT_TYPE_SZARRAY) { // display the base type of SZARRAY if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb))) goto ErrExit; cbCur += cb; goto ErrExit; } // instantiated type if (ulData == ELEMENT_TYPE_GENERICINST) { // display the type constructor if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb))) goto ErrExit; cbCur += cb; ULONG numArgs; cb = CorSigUncompressData(&pbSigBlob[cbCur], &numArgs); cbCur += cb; IfFailGo(AddToSigBuffer("<")); while (numArgs > 0) { if (cbCur > ulSigBlob) goto ErrExit; if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb))) goto ErrExit; cbCur += cb; --numArgs; if (numArgs > 0) IfFailGo(AddToSigBuffer(",")); } IfFailGo(AddToSigBuffer(">")); goto ErrExit; } if (ulData == ELEMENT_TYPE_VAR) { ULONG index; cb = CorSigUncompressData(&pbSigBlob[cbCur], &index); cbCur += cb; sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, "!%d", index); IfFailGo(AddToSigBuffer(m_tempFormatBuffer)); goto ErrExit; } if (ulData == ELEMENT_TYPE_MVAR) { ULONG index; cb = CorSigUncompressData(&pbSigBlob[cbCur], &index); cbCur += cb; sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, "!!%d", index); IfFailGo(AddToSigBuffer(m_tempFormatBuffer)); goto ErrExit; } if (ulData == ELEMENT_TYPE_FNPTR) { cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData); cbCur += cb; if (ulData & IMAGE_CEE_CS_CALLCONV_EXPLICITTHIS) IfFailGo(AddToSigBuffer(" explicit")); if (ulData & IMAGE_CEE_CS_CALLCONV_HASTHIS) IfFailGo(AddToSigBuffer(" hasThis")); IfFailGo(AddToSigBuffer(" ")); IfFailGo(AddToSigBuffer(g_strCalling[ulData & IMAGE_CEE_CS_CALLCONV_MASK])); // Get number of args ULONG numArgs; cb = CorSigUncompressData(&pbSigBlob[cbCur], &numArgs); cbCur += cb; // do return type if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb))) goto ErrExit; cbCur += cb; IfFailGo(AddToSigBuffer("(")); while (numArgs > 0) { if (cbCur > ulSigBlob) goto ErrExit; if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb))) goto ErrExit; cbCur += cb; --numArgs; if (numArgs > 0) IfFailGo(AddToSigBuffer(",")); } IfFailGo(AddToSigBuffer(" )")); goto ErrExit; } if(ulData != ELEMENT_TYPE_ARRAY) return E_FAIL; // display the base type of SDARRAY if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb))) goto ErrExit; cbCur += cb; // display the rank of MDARRAY cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData); cbCur += cb; sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", ulData); IfFailGo(AddToSigBuffer(m_tempFormatBuffer)); if (ulData == 0) // we are done if no rank specified goto ErrExit; // how many dimensions have size specified? cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData); cbCur += cb; sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", ulData); IfFailGo(AddToSigBuffer(m_tempFormatBuffer)); while (ulData) { cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulTemp); sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", ulTemp); IfFailGo(AddToSigBuffer(m_tempFormatBuffer)); cbCur += cb; ulData--; } // how many dimensions have lower bounds specified? cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData); cbCur += cb; sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", ulData); IfFailGo(AddToSigBuffer(m_tempFormatBuffer)); while (ulData) { cb = CorSigUncompressSignedInt(&pbSigBlob[cbCur], &iTemp); sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", iTemp); IfFailGo(AddToSigBuffer(m_tempFormatBuffer)); cbCur += cb; ulData--; } ErrExit: if (cbCur > ulSigBlob) hr = E_FAIL; *pcb = cbCur; return hr; } // HRESULT MDInfo::GetOneElementType() // Display the fields of the N/Direct custom value structure. void MDInfo::DisplayCorNativeLink(COR_NATIVE_LINK *pCorNLnk, const char *preFix) { // Print the LinkType. const char *curField = "\tLink Type : "; switch(pCorNLnk->m_linkType) { case nltNone: VWriteLine("%s%s%s(%02x)", preFix, curField, "nltNone", pCorNLnk->m_linkType); break; case nltAnsi: VWriteLine("%s%s%s(%02x)", preFix, curField, "nltAnsi", pCorNLnk->m_linkType); break; case nltUnicode: VWriteLine("%s%s%s(%02x)", preFix, curField, "nltUnicode", pCorNLnk->m_linkType); break; case nltAuto: VWriteLine("%s%s%s(%02x)", preFix, curField, "nltAuto", pCorNLnk->m_linkType); break; default: _ASSERTE(!"Invalid Native Link Type!"); } // Print the link flags curField = "\tLink Flags : "; switch(pCorNLnk->m_flags) { case nlfNone: VWriteLine("%s%s%s(%02x)", preFix, curField, "nlfNone", pCorNLnk->m_flags); break; case nlfLastError: VWriteLine("%s%s%s(%02x)", preFix, curField, "nlfLastError", pCorNLnk->m_flags); break; default: _ASSERTE(!"Invalid Native Link Flags!"); } // Print the entry point. WCHAR memRefName[STRING_BUFFER_LEN]; HRESULT hr; hr = m_pImport->GetMemberRefProps( pCorNLnk->m_entryPoint, NULL, memRefName, STRING_BUFFER_LEN, NULL, NULL, NULL); if (FAILED(hr)) Error("GetMemberRefProps failed.", hr); VWriteLine("%s\tEntry Point : %ls (0x%08x)", preFix, memRefName, pCorNLnk->m_entryPoint); } // void MDInfo::DisplayCorNativeLink() // Fills given varaint with value given in pValue and of type in bCPlusTypeFlag // // Taken from MetaInternal.cpp HRESULT _FillVariant( BYTE bCPlusTypeFlag, const void *pValue, ULONG cbValue, VARIANT *pvar) { HRESULT hr = NOERROR; switch (bCPlusTypeFlag) { case ELEMENT_TYPE_BOOLEAN: V_VT(pvar) = VT_BOOL; V_BOOL(pvar) = *((BYTE*)pValue); //*((UNALIGNED VARIANT_BOOL *)pValue); break; case ELEMENT_TYPE_I1: V_VT(pvar) = VT_I1; V_I1(pvar) = *((CHAR*)pValue); break; case ELEMENT_TYPE_U1: V_VT(pvar) = VT_UI1; V_UI1(pvar) = *((BYTE*)pValue); break; case ELEMENT_TYPE_I2: V_VT(pvar) = VT_I2; V_I2(pvar) = GET_UNALIGNED_VAL16(pValue); break; case ELEMENT_TYPE_U2: case ELEMENT_TYPE_CHAR: V_VT(pvar) = VT_UI2; V_UI2(pvar) = GET_UNALIGNED_VAL16(pValue); break; case ELEMENT_TYPE_I4: V_VT(pvar) = VT_I4; V_I4(pvar) = GET_UNALIGNED_VAL32(pValue); break; case ELEMENT_TYPE_U4: V_VT(pvar) = VT_UI4; V_UI4(pvar) = GET_UNALIGNED_VAL32(pValue); break; case ELEMENT_TYPE_R4: { V_VT(pvar) = VT_R4; __int32 Value = GET_UNALIGNED_VAL32(pValue); V_R4(pvar) = (float &)Value; } break; case ELEMENT_TYPE_R8: { V_VT(pvar) = VT_R8; __int64 Value = GET_UNALIGNED_VAL64(pValue); V_R8(pvar) = (double &) Value; } break; case ELEMENT_TYPE_STRING: { V_VT(pvar) = VT_BSTR; WCHAR *TempString;; #if BIGENDIAN TempString = (WCHAR *)alloca(cbValue); memcpy(TempString, pValue, cbValue); SwapStringLength(TempString, cbValue/sizeof(WCHAR)); #else TempString = (WCHAR *)pValue; #endif // allocated bstr here V_BSTR(pvar) = ::SysAllocStringLen((LPWSTR)TempString, cbValue/sizeof(WCHAR)); if (V_BSTR(pvar) == NULL) hr = E_OUTOFMEMORY; } break; case ELEMENT_TYPE_CLASS: V_VT(pvar) = VT_UNKNOWN; V_UNKNOWN(pvar) = NULL; // _ASSERTE( GET_UNALIGNED_VAL32(pValue) == 0); break; case ELEMENT_TYPE_I8: V_VT(pvar) = VT_I8; V_CY(pvar).int64 = GET_UNALIGNED_VAL64(pValue); break; case ELEMENT_TYPE_U8: V_VT(pvar) = VT_UI8; V_CY(pvar).int64 = GET_UNALIGNED_VAL64(pValue); break; case ELEMENT_TYPE_VOID: V_VT(pvar) = VT_EMPTY; break; default: _ASSERTE(!"bad constant value type!"); } return hr; } // HRESULT _FillVariant() void MDInfo::DisplayAssembly() { if (m_pAssemblyImport) { DisplayAssemblyInfo(); DisplayAssemblyRefs(); DisplayFiles(); DisplayExportedTypes(); DisplayManifestResources(); } } // void MDInfo::DisplayAssembly() void MDInfo::DisplayAssemblyInfo() { HRESULT hr; mdAssembly mda; const BYTE *pbPublicKey; ULONG cbPublicKey; ULONG ulHashAlgId; WCHAR szName[STRING_BUFFER_LEN]; ASSEMBLYMETADATA MetaData; DWORD dwFlags; hr = m_pAssemblyImport->GetAssemblyFromScope(&mda); if (hr == CLDB_E_RECORD_NOTFOUND) return; else if (FAILED(hr)) Error("GetAssemblyFromScope() failed.", hr); // Get the required sizes for the arrays of locales, processors etc. ZeroMemory(&MetaData, sizeof(ASSEMBLYMETADATA)); hr = m_pAssemblyImport->GetAssemblyProps(mda, NULL, NULL, // Public Key. NULL, // Hash Algorithm. NULL, 0, NULL, // Name. &MetaData, NULL); // Flags. if (FAILED(hr)) Error("GetAssemblyProps() failed.", hr); // Allocate space for the arrays in the ASSEMBLYMETADATA structure. if (MetaData.cbLocale) MetaData.szLocale = new WCHAR[MetaData.cbLocale]; if (MetaData.ulProcessor) MetaData.rProcessor = new DWORD[MetaData.ulProcessor]; if (MetaData.ulOS) MetaData.rOS = new OSINFO[MetaData.ulOS]; hr = m_pAssemblyImport->GetAssemblyProps(mda, (const void **)&pbPublicKey, &cbPublicKey, &ulHashAlgId, szName, STRING_BUFFER_LEN, NULL, &MetaData, &dwFlags); if (FAILED(hr)) Error("GetAssemblyProps() failed.", hr); WriteLine("Assembly"); WriteLine("-------------------------------------------------------"); VWriteLine("\tToken: 0x%08x", mda); VWriteLine("\tName : %ls", szName); DumpHex("\tPublic Key ", pbPublicKey, cbPublicKey, false, 24); VWriteLine("\tHash Algorithm : 0x%08x", ulHashAlgId); DisplayASSEMBLYMETADATA(&MetaData); if(MetaData.szLocale) delete [] MetaData.szLocale; if(MetaData.rProcessor) delete [] MetaData.rProcessor; if(MetaData.rOS) delete [] MetaData.rOS; char sFlags[STRING_BUFFER_LEN]; DWORD flags = dwFlags; sFlags[0] = 0; ISFLAG(Af, PublicKey); ISFLAG(Af, Retargetable); ISFLAG(AfContentType_, WindowsRuntime); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); VWriteLine("\tFlags : %s (%08x)", sFlags, dwFlags); DisplayCustomAttributes(mda, "\t"); DisplayPermissions(mda, "\t"); WriteLine(""); } // void MDInfo::DisplayAssemblyInfo() void MDInfo::DisplayAssemblyRefs() { HCORENUM assemblyRefEnum = NULL; mdAssemblyRef AssemblyRefs[ENUM_BUFFER_SIZE]; ULONG count; ULONG totalCount = 1; HRESULT hr; while (SUCCEEDED(hr = m_pAssemblyImport->EnumAssemblyRefs( &assemblyRefEnum, AssemblyRefs, ARRAY_SIZE(AssemblyRefs), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("AssemblyRef #%d (%08x)", totalCount, AssemblyRefs[i]); WriteLine("-------------------------------------------------------"); DisplayAssemblyRefInfo(AssemblyRefs[i]); WriteLine(""); } } m_pAssemblyImport->CloseEnum(assemblyRefEnum); } // void MDInfo::DisplayAssemblyRefs() void MDInfo::DisplayAssemblyRefInfo(mdAssemblyRef inAssemblyRef) { HRESULT hr; const BYTE *pbPublicKeyOrToken; ULONG cbPublicKeyOrToken; WCHAR szName[STRING_BUFFER_LEN]; ASSEMBLYMETADATA MetaData; const BYTE *pbHashValue; ULONG cbHashValue; DWORD dwFlags; VWriteLine("\tToken: 0x%08x", inAssemblyRef); // Get sizes for the arrays in the ASSEMBLYMETADATA structure. ZeroMemory(&MetaData, sizeof(ASSEMBLYMETADATA)); hr = m_pAssemblyImport->GetAssemblyRefProps(inAssemblyRef, NULL, NULL, // Public Key or Token. NULL, 0, NULL, // Name. &MetaData, NULL, NULL, // HashValue. NULL); // Flags. if (FAILED(hr)) Error("GetAssemblyRefProps() failed.", hr); // Allocate space for the arrays in the ASSEMBLYMETADATA structure. if (MetaData.cbLocale) MetaData.szLocale = new WCHAR[MetaData.cbLocale]; if (MetaData.ulProcessor) MetaData.rProcessor = new DWORD[MetaData.ulProcessor]; if (MetaData.ulOS) MetaData.rOS = new OSINFO[MetaData.ulOS]; hr = m_pAssemblyImport->GetAssemblyRefProps(inAssemblyRef, (const void **)&pbPublicKeyOrToken, &cbPublicKeyOrToken, szName, STRING_BUFFER_LEN, NULL, &MetaData, (const void **)&pbHashValue, &cbHashValue, &dwFlags); if (FAILED(hr)) Error("GetAssemblyRefProps() failed.", hr); DumpHex("\tPublic Key or Token", pbPublicKeyOrToken, cbPublicKeyOrToken, false, 24); VWriteLine("\tName: %ls", szName); DisplayASSEMBLYMETADATA(&MetaData); if(MetaData.szLocale) delete [] MetaData.szLocale; if(MetaData.rProcessor) delete [] MetaData.rProcessor; if(MetaData.rOS) delete [] MetaData.rOS; DumpHex("\tHashValue Blob", pbHashValue, cbHashValue, false, 24); char sFlags[STRING_BUFFER_LEN]; DWORD flags = dwFlags; sFlags[0] = 0; ISFLAG(Af, PublicKey); ISFLAG(Af, Retargetable); ISFLAG(AfContentType_, WindowsRuntime); #if 0 ISFLAG(Af, LegacyLibrary); ISFLAG(Af, LegacyPlatform); ISFLAG(Af, Library); ISFLAG(Af, Platform); #endif if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); VWriteLine("\tFlags: %s (%08x)", sFlags, dwFlags); DisplayCustomAttributes(inAssemblyRef, "\t"); WriteLine(""); } // void MDInfo::DisplayAssemblyRefInfo() void MDInfo::DisplayFiles() { HCORENUM fileEnum = NULL; mdFile Files[ENUM_BUFFER_SIZE]; ULONG count; ULONG totalCount = 1; HRESULT hr; while (SUCCEEDED(hr = m_pAssemblyImport->EnumFiles( &fileEnum, Files, ARRAY_SIZE(Files), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("File #%d (%08x)", totalCount, Files[i]); WriteLine("-------------------------------------------------------"); DisplayFileInfo(Files[i]); WriteLine(""); } } m_pAssemblyImport->CloseEnum(fileEnum); } // void MDInfo::DisplayFiles() void MDInfo::DisplayFileInfo(mdFile inFile) { HRESULT hr; WCHAR szName[STRING_BUFFER_LEN]; const BYTE *pbHashValue; ULONG cbHashValue; DWORD dwFlags; VWriteLine("\tToken: 0x%08x", inFile); hr = m_pAssemblyImport->GetFileProps(inFile, szName, STRING_BUFFER_LEN, NULL, (const void **)&pbHashValue, &cbHashValue, &dwFlags); if (FAILED(hr)) Error("GetFileProps() failed.", hr); VWriteLine("\tName : %ls", szName); DumpHex("\tHashValue Blob ", pbHashValue, cbHashValue, false, 24); char sFlags[STRING_BUFFER_LEN]; DWORD flags = dwFlags; sFlags[0] = 0; ISFLAG(Ff, ContainsMetaData); ISFLAG(Ff, ContainsNoMetaData); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); VWriteLine("\tFlags : %s (%08x)", sFlags, dwFlags); DisplayCustomAttributes(inFile, "\t"); WriteLine(""); } // MDInfo::DisplayFileInfo() void MDInfo::DisplayExportedTypes() { HCORENUM comTypeEnum = NULL; mdExportedType ExportedTypes[ENUM_BUFFER_SIZE]; ULONG count; ULONG totalCount = 1; HRESULT hr; while (SUCCEEDED(hr = m_pAssemblyImport->EnumExportedTypes( &comTypeEnum, ExportedTypes, ARRAY_SIZE(ExportedTypes), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("ExportedType #%d (%08x)", totalCount, ExportedTypes[i]); WriteLine("-------------------------------------------------------"); DisplayExportedTypeInfo(ExportedTypes[i]); WriteLine(""); } } m_pAssemblyImport->CloseEnum(comTypeEnum); } // void MDInfo::DisplayExportedTypes() void MDInfo::DisplayExportedTypeInfo(mdExportedType inExportedType) { HRESULT hr; WCHAR szName[STRING_BUFFER_LEN]; mdToken tkImplementation; mdTypeDef tkTypeDef; DWORD dwFlags; char sFlags[STRING_BUFFER_LEN]; VWriteLine("\tToken: 0x%08x", inExportedType); hr = m_pAssemblyImport->GetExportedTypeProps(inExportedType, szName, STRING_BUFFER_LEN, NULL, &tkImplementation, &tkTypeDef, &dwFlags); if (FAILED(hr)) Error("GetExportedTypeProps() failed.", hr); VWriteLine("\tName: %ls", szName); VWriteLine("\tImplementation token: 0x%08x", tkImplementation); VWriteLine("\tTypeDef token: 0x%08x", tkTypeDef); VWriteLine("\tFlags : %s (%08x)",ClassFlags(dwFlags, sFlags), dwFlags); DisplayCustomAttributes(inExportedType, "\t"); WriteLine(""); } // void MDInfo::DisplayExportedTypeInfo() void MDInfo::DisplayManifestResources() { HCORENUM manifestResourceEnum = NULL; mdManifestResource ManifestResources[ENUM_BUFFER_SIZE]; ULONG count; ULONG totalCount = 1; HRESULT hr; while (SUCCEEDED(hr = m_pAssemblyImport->EnumManifestResources( &manifestResourceEnum, ManifestResources, ARRAY_SIZE(ManifestResources), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("ManifestResource #%d (%08x)", totalCount, ManifestResources[i]); WriteLine("-------------------------------------------------------"); DisplayManifestResourceInfo(ManifestResources[i]); WriteLine(""); } } m_pAssemblyImport->CloseEnum(manifestResourceEnum); } // void MDInfo::DisplayManifestResources() void MDInfo::DisplayManifestResourceInfo(mdManifestResource inManifestResource) { HRESULT hr; WCHAR szName[STRING_BUFFER_LEN]; mdToken tkImplementation; DWORD dwOffset; DWORD dwFlags; VWriteLine("\tToken: 0x%08x", inManifestResource); hr = m_pAssemblyImport->GetManifestResourceProps(inManifestResource, szName, STRING_BUFFER_LEN, NULL, &tkImplementation, &dwOffset, &dwFlags); if (FAILED(hr)) Error("GetManifestResourceProps() failed.", hr); VWriteLine("Name: %ls", szName); VWriteLine("Implementation token: 0x%08x", tkImplementation); VWriteLine("Offset: 0x%08x", dwOffset); char sFlags[STRING_BUFFER_LEN]; DWORD flags = dwFlags; sFlags[0] = 0; ISFLAG(Mr, Public); ISFLAG(Mr, Private); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); VWriteLine("\tFlags: %s (%08x)", sFlags, dwFlags); DisplayCustomAttributes(inManifestResource, "\t"); WriteLine(""); } // void MDInfo::DisplayManifestResourceInfo() void MDInfo::DisplayASSEMBLYMETADATA(ASSEMBLYMETADATA *pMetaData) { ULONG i; VWriteLine("\tVersion: %d.%d.%d.%d", pMetaData->usMajorVersion, pMetaData->usMinorVersion, pMetaData->usBuildNumber, pMetaData->usRevisionNumber); VWriteLine("\tMajor Version: 0x%08x", pMetaData->usMajorVersion); VWriteLine("\tMinor Version: 0x%08x", pMetaData->usMinorVersion); VWriteLine("\tBuild Number: 0x%08x", pMetaData->usBuildNumber); VWriteLine("\tRevision Number: 0x%08x", pMetaData->usRevisionNumber); VWriteLine("\tLocale: %ls", pMetaData->cbLocale ? pMetaData->szLocale : W("<null>")); for (i = 0; i < pMetaData->ulProcessor; i++) VWriteLine("\tProcessor #%ld: 0x%08x", i+1, pMetaData->rProcessor[i]); for (i = 0; i < pMetaData->ulOS; i++) { VWriteLine("\tOS #%ld:", i+1); VWriteLine("\t\tOS Platform ID: 0x%08x", pMetaData->rOS[i].dwOSPlatformId); VWriteLine("\t\tOS Major Version: 0x%08x", pMetaData->rOS[i].dwOSMajorVersion); VWriteLine("\t\tOS Minor Version: 0x%08x", pMetaData->rOS[i].dwOSMinorVersion); } } // void MDInfo::DisplayASSEMBLYMETADATA() void MDInfo::DisplayUserStrings() { HCORENUM stringEnum = NULL; // string enumerator. mdString Strings[ENUM_BUFFER_SIZE]; // String tokens from enumerator. CQuickArray<WCHAR> rUserString; // Buffer to receive string. WCHAR *szUserString; // Working pointer into buffer. ULONG chUserString; // Size of user string. CQuickArray<char> rcBuf; // Buffer to hold the BLOB version of the string. char *szBuf; // Working pointer into buffer. ULONG chBuf; // Saved size of the user string. ULONG count; // Items returned from enumerator. ULONG totalCount = 1; // Running count of strings. bool bUnprint = false; // Is an unprintable character found? HRESULT hr; // A result. while (SUCCEEDED(hr = m_pImport->EnumUserStrings( &stringEnum, Strings, ARRAY_SIZE(Strings), &count)) && count > 0) { if (totalCount == 1) { // If only one, it is the NULL string, so don't print it. WriteLine("User Strings"); WriteLine("-------------------------------------------------------"); } for (ULONG i = 0; i < count; i++, totalCount++) { do { // Try to get the string into the existing buffer. hr = m_pImport->GetUserString( Strings[i], rUserString.Ptr(),(ULONG32)rUserString.MaxSize(), &chUserString); if (hr == CLDB_S_TRUNCATION) { // Buffer wasn't big enough, try to enlarge it. if (FAILED(rUserString.ReSizeNoThrow(chUserString))) Error("malloc failed.", E_OUTOFMEMORY); continue; } } while (hr == CLDB_S_TRUNCATION); if (FAILED(hr)) Error("GetUserString failed.", hr); szUserString = rUserString.Ptr(); chBuf = chUserString; VWrite("%08x : (%2d) L\"", Strings[i], chUserString); for (ULONG j=0; j<chUserString; j++) { switch (*szUserString) { case 0: Write("\\0"); break; case L'\r': Write("\\r"); break; case L'\n': Write("\\n"); break; case L'\t': Write("\\t"); break; default: if (iswprint(*szUserString)) VWrite("%lc", *szUserString); else { bUnprint = true; Write("."); } break; } ++szUserString; if((j>0)&&((j&0x7F)==0)) WriteLine(""); } WriteLine("\""); // Print the user string as a blob if an unprintable character is found. if (bUnprint) { bUnprint = false; szUserString = rUserString.Ptr(); if (FAILED(hr = rcBuf.ReSizeNoThrow(81))) //(chBuf * 5 + 1); Error("ReSize failed.", hr); szBuf = rcBuf.Ptr(); ULONG j,k; WriteLine("\t\tUser string has unprintables, hex format below:"); for (j = 0,k=0; j < chBuf; j++) { sprintf_s (&szBuf[k*5], 81, "%04x ", szUserString[j]); k++; if((k==16)||(j == (chBuf-1))) { szBuf[k*5] = '\0'; VWriteLine("\t\t%s", szBuf); k=0; } } } } } if (stringEnum) m_pImport->CloseEnum(stringEnum); } // void MDInfo::DisplayUserStrings() void MDInfo::DisplayUnsatInfo() { HRESULT hr = S_OK; HCORENUM henum = 0; mdToken tk; ULONG cMethods; Write("\nUnresolved Externals\n"); Write("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"); while ( (hr = m_pImport->EnumUnresolvedMethods( &henum, &tk, 1, &cMethods)) == S_OK && cMethods ) { if ( TypeFromToken(tk) == mdtMethodDef ) { // a method definition without implementation DisplayMethodInfo( tk ); } else if ( TypeFromToken(tk) == mdtMemberRef ) { // an unresolved MemberRef to a global function DisplayMemberRefInfo( tk, "" ); } else { _ASSERTE(!"Unknown token kind!"); } } m_pImport->CloseEnum(henum); } // void MDInfo::DisplayUnsatInfo() //******************************************************************************* // This code is used for debugging purposes only. This will just print out the // entire database. //******************************************************************************* const char *MDInfo::DumpRawNameOfType(ULONG iType) { if (iType <= iRidMax) { const char *pNameTable; m_pTables->GetTableInfo(iType, 0,0,0,0, &pNameTable); return pNameTable; } else // Is the field a coded token? if (iType <= iCodedTokenMax) { int iCdTkn = iType - iCodedToken; const char *pNameCdTkn; m_pTables->GetCodedTokenInfo(iCdTkn, 0,0, &pNameCdTkn); return pNameCdTkn; } // Fixed type. switch (iType) { case iBYTE: return "BYTE"; case iSHORT: return "short"; case iUSHORT: return "USHORT"; case iLONG: return "long"; case iULONG: return "ULONG"; case iSTRING: return "string"; case iGUID: return "GUID"; case iBLOB: return "blob"; } // default: static char buf[30]; sprintf_s(buf, 30, "unknown type 0x%02x", iType); return buf; } // const char *MDInfo::DumpRawNameOfType() void MDInfo::DumpRawCol(ULONG ixTbl, ULONG ixCol, ULONG rid, bool bStats) { ULONG ulType; // Type of a column. ULONG ulVal; // Value of a column. LPCUTF8 pString; // Pointer to a string. const void *pBlob; // Pointer to a blob. ULONG cb; // Size of something. m_pTables->GetColumn(ixTbl, ixCol, rid, &ulVal); m_pTables->GetColumnInfo(ixTbl, ixCol, 0, 0, &ulType, 0); if (ulType <= iRidMax) { const char *pNameTable; m_pTables->GetTableInfo(ulType, 0,0,0,0, &pNameTable); VWrite("%s[%x]", pNameTable, ulVal); } else // Is the field a coded token? if (ulType <= iCodedTokenMax) { int iCdTkn = ulType - iCodedToken; const char *pNameCdTkn; m_pTables->GetCodedTokenInfo(iCdTkn, 0,0, &pNameCdTkn); VWrite("%s[%08x]", pNameCdTkn, ulVal); } else { // Fixed type. switch (ulType) { case iBYTE: VWrite("%02x", ulVal); break; case iSHORT: case iUSHORT: VWrite("%04x", ulVal); break; case iLONG: case iULONG: VWrite("%08x", ulVal); break; case iSTRING: if (ulVal && (m_DumpFilter & dumpNames)) { m_pTables->GetString(ulVal, &pString); VWrite("(%x)\"%s\"", ulVal, pString); } else VWrite("string#%x", ulVal); if (bStats && ulVal) { m_pTables->GetString(ulVal, &pString); cb = (ULONG) strlen(pString) + 1; VWrite("(%d)", cb); } break; case iGUID: VWrite("guid#%x", ulVal); if (bStats && ulVal) { VWrite("(16)"); } break; case iBLOB: VWrite("blob#%x", ulVal); if (bStats && ulVal) { m_pTables->GetBlob(ulVal, &cb, &pBlob); cb += 1; if (cb > 128) cb += 1; if (cb > 16535) cb += 1; VWrite("(%d)", cb); } break; default: VWrite("unknown type 0x%04x", ulVal); break; } } } // void MDInfo::DumpRawCol() ULONG MDInfo::DumpRawColStats(ULONG ixTbl, ULONG ixCol, ULONG cRows) { ULONG rslt = 0; ULONG ulType; // Type of a column. ULONG ulVal; // Value of a column. LPCUTF8 pString; // Pointer to a string. const void *pBlob; // Pointer to a blob. ULONG cb; // Size of something. m_pTables->GetColumnInfo(ixTbl, ixCol, 0, 0, &ulType, 0); if (IsHeapType(ulType)) { for (ULONG rid=1; rid<=cRows; ++rid) { m_pTables->GetColumn(ixTbl, ixCol, rid, &ulVal); // Fixed type. switch (ulType) { case iSTRING: if (ulVal) { m_pTables->GetString(ulVal, &pString); cb = (ULONG) strlen(pString); rslt += cb + 1; } break; case iGUID: if (ulVal) rslt += 16; break; case iBLOB: if (ulVal) { m_pTables->GetBlob(ulVal, &cb, &pBlob); rslt += cb + 1; if (cb > 128) rslt += 1; if (cb > 16535) rslt += 1; } break; default: break; } } } return rslt; } // ULONG MDInfo::DumpRawColStats() int MDInfo::DumpHex( const char *szPrefix, // String prefix for first line. const void *pvData, // The data to print. ULONG cbData, // Bytes of data to print. int bText, // If true, also dump text. ULONG nLine) // Bytes per line to print. { const BYTE *pbData = static_cast<const BYTE*>(pvData); ULONG i; // Loop control. ULONG nPrint; // Number to print in an iteration. ULONG nSpace; // Spacing calculations. ULONG nPrefix; // Size of the prefix. ULONG nLines=0; // Number of lines printed. const char *pPrefix; // For counting spaces in the prefix. // Round down to 8 characters. nLine = nLine & ~0x7; for (nPrefix=0, pPrefix=szPrefix; *pPrefix; ++pPrefix) { if (*pPrefix == '\t') nPrefix = (nPrefix + 8) & ~7; else ++nPrefix; } //nPrefix = strlen(szPrefix); do { // Write the line prefix. if (szPrefix) VWrite("%s:", szPrefix); else VWrite("%*s:", nPrefix, ""); szPrefix = 0; ++nLines; // Calculate spacing. nPrint = min(cbData, nLine); nSpace = nLine - nPrint; // dump in hex. for(i=0; i<nPrint; i++) { if ((i&7) == 0) Write(" "); VWrite("%02x ", pbData[i]); } if (bText) { // Space out to the text spot. if (nSpace) VWrite("%*s", nSpace*3+nSpace/8, ""); // Dump in text. Write(">"); for(i=0; i<nPrint; i++) VWrite("%c", (isprint(pbData[i])) ? pbData[i] : ' '); // Space out the text, and finish the line. VWrite("%*s<", nSpace, ""); } VWriteLine(""); // Next data to print. cbData -= nPrint; pbData += nPrint; } while (cbData > 0); return nLines; } // int MDInfo::DumpHex() void MDInfo::DumpRawHeaps() { HRESULT hr; // A result. ULONG ulSize; // Bytes in a heap. const BYTE *pData; // Pointer to a blob. ULONG cbData; // Size of a blob. ULONG oData; // Offset of current blob. char rcPrefix[30]; // To format line prefix. m_pTables->GetBlobHeapSize(&ulSize); VWriteLine(""); VWriteLine("Blob Heap: %d(%#x) bytes", ulSize,ulSize); oData = 0; do { m_pTables->GetBlob(oData, &cbData, (const void**)&pData); sprintf_s(rcPrefix, 30, "%5x,%-2x", oData, cbData); DumpHex(rcPrefix, pData, cbData); hr = m_pTables->GetNextBlob(oData, &oData); } while (hr == S_OK); m_pTables->GetStringHeapSize(&ulSize); VWriteLine(""); VWriteLine("String Heap: %d(%#x) bytes", ulSize,ulSize); oData = 0; const char *pString; do { m_pTables->GetString(oData, &pString); if (m_DumpFilter & dumpMoreHex) { sprintf_s(rcPrefix, 30, "%08x", oData); DumpHex(rcPrefix, pString, (ULONG)strlen(pString)+1); } else if (*pString != 0) VWrite("%08x: %s\n", oData, pString); hr = m_pTables->GetNextString(oData, &oData); } while (hr == S_OK); VWriteLine(""); DisplayUserStrings(); } // void MDInfo::DumpRawHeaps() void MDInfo::DumpRaw(int iDump, bool bunused) { ULONG cTables; // Tables in the database. ULONG cCols; // Columns in a table. ULONG cRows; // Rows in a table. ULONG cbRow; // Bytes in a row of a table. ULONG iKey; // Key column of a table. const char *pNameTable; // Name of a table. ULONG oCol; // Offset of a column. ULONG cbCol; // Size of a column. ULONG ulType; // Type of a column. const char *pNameColumn; // Name of a column. ULONG ulSize; // Heaps is easy -- there is a specific bit for that. bool bStats = (m_DumpFilter & dumpStats) != 0; // Rows are harder. Was there something else that limited data? BOOL bRows = (m_DumpFilter & (dumpSchema | dumpHeader)) == 0; BOOL bSchema = bRows || (m_DumpFilter & dumpSchema); // (m_DumpFilter & (dumpSchema | dumpHeader | dumpCSV | dumpRaw | dumpStats | dumpRawHeaps)) if (m_pTables2) { // Get the raw metadata header. const BYTE *pbData = NULL; const BYTE *pbStream = NULL; // One of the stream.s const BYTE *pbMd = NULL; // The metadata stream. ULONG cbData = 0; ULONG cbStream = 0; // One of the streams. ULONG cbMd = 0; // The metadata stream. const char *pName; HRESULT hr = S_OK; ULONG ix; m_pTables2->GetMetaDataStorage((const void**)&pbData, &cbData); // Per the ECMA spec, the section data looks like this: struct MDSTORAGESIGNATURE { ULONG lSignature; // "Magic" signature. USHORT iMajorVer; // Major file version. USHORT iMinorVer; // Minor file version. ULONG iExtraData; // Offset to next structure of information ULONG iVersionString; // Length of version string BYTE pVersion[0]; // Version string }; struct MDSTORAGEHEADER { BYTE fFlags; // STGHDR_xxx flags. BYTE pad; USHORT iStreams; // How many streams are there. }; const MDSTORAGESIGNATURE *pStorage = (const MDSTORAGESIGNATURE *) pbData; const MDSTORAGEHEADER *pSHeader = (const MDSTORAGEHEADER *)(pbData + sizeof(MDSTORAGESIGNATURE) + pStorage->iVersionString); VWriteLine("Metadata section: 0x%08x, version: %d.%d, extra: %d, version len: %d, version: %s", pStorage->lSignature, pStorage->iMajorVer, pStorage->iMinorVer, pStorage->iExtraData, pStorage->iVersionString, pStorage->pVersion); VWriteLine(" flags: 0x%02x, streams: %d", pSHeader->fFlags, pSHeader->iStreams); if (m_DumpFilter & dumpMoreHex) { const BYTE *pbEnd = pbData; ULONG cb = sizeof(MDSTORAGESIGNATURE) + pStorage->iVersionString + sizeof(MDSTORAGEHEADER); hr = m_pTables2->GetMetaDataStreamInfo(0, &pName, (const void**)&pbEnd, &cbStream); if (hr == S_OK) cb = (ULONG)(pbEnd - pbData); DumpHex(" ", pbData, cb); } for (ix=0; hr == S_OK; ++ix) { hr = m_pTables2->GetMetaDataStreamInfo(ix, &pName, (const void**)&pbStream, &cbStream); if (hr != S_OK) break; if (strcmp(pName, "#~") == 0 || strcmp(pName, "#-") == 0) { pbMd = pbStream; cbMd = cbStream; } VWriteLine("Stream %d: name: %s, size %d", ix, pName, cbStream); // hex for individual stream headers in metadata section dump. hex for // the streams themselves distributed throughout the dump. } if (pbMd) { // Per ECMA, the metadata header looks like this: struct MD { ULONG m_ulReserved; // Reserved, must be zero. BYTE m_major; // Version numbers. BYTE m_minor; BYTE m_heaps; // Bits for heap sizes. BYTE m_rid; // log-base-2 of largest rid. unsigned __int64 m_maskvalid; // Bit mask of present table counts. unsigned __int64 m_sorted; // Bit mask of sorted tables. }; }; const MD *pMd; pMd = (const MD *)pbMd; VWriteLine("Metadata header: %d.%d, heaps: 0x%02x, rid: 0x%02x, valid: 0x%016I64x, sorted: 0x%016I64x", pMd->m_major, pMd->m_minor, pMd->m_heaps, pMd->m_rid, (ULONGLONG)GET_UNALIGNED_VAL64(&(pMd->m_maskvalid)), (ULONGLONG)GET_UNALIGNED_VAL64(&(pMd->m_sorted))); if (m_DumpFilter & dumpMoreHex) { DumpHex(" ", pbMd, sizeof(MD)); } } VWriteLine(""); } m_pTables->GetNumTables(&cTables); m_pTables->GetStringHeapSize(&ulSize); VWrite("Strings: %d(%#x)", ulSize, ulSize); m_pTables->GetBlobHeapSize(&ulSize); VWrite(", Blobs: %d(%#x)", ulSize, ulSize); m_pTables->GetGuidHeapSize(&ulSize); VWrite(", Guids: %d(%#x)", ulSize, ulSize); m_pTables->GetUserStringHeapSize(&ulSize); VWriteLine(", User strings: %d(%#x)", ulSize, ulSize); for (ULONG ixTbl = 0; ixTbl < cTables; ++ixTbl) { m_pTables->GetTableInfo(ixTbl, &cbRow, &cRows, &cCols, &iKey, &pNameTable); if (bRows) // when dumping rows, print a break between row data and schema VWriteLine("================================================="); VWriteLine("%2d(%#x): %-20s cRecs:%5d(%#x), cbRec:%3d(%#x), cbTable:%6d(%#x)", ixTbl, ixTbl, pNameTable, cRows, cRows, cbRow, cbRow, cbRow * cRows, cbRow * cRows); if (!bSchema && !bRows) continue; // Dump column definitions for the table. ULONG ixCol; for (ixCol=0; ixCol<cCols; ++ixCol) { m_pTables->GetColumnInfo(ixTbl, ixCol, &oCol, &cbCol, &ulType, &pNameColumn); VWrite(" col %2x:%c %-12s oCol:%2x, cbCol:%x, %-7s", ixCol, ((ixCol==iKey)?'*':' '), pNameColumn, oCol, cbCol, DumpRawNameOfType(ulType)); if (bStats) { ulSize = DumpRawColStats(ixTbl, ixCol, cRows); if (ulSize) VWrite("(%d)", ulSize); } VWriteLine(""); } if (!bRows) continue; // Dump the rows. for (ULONG rid = 1; rid <= cRows; ++rid) { if (rid == 1) VWriteLine("-------------------------------------------------"); VWrite(" %3x == ", rid); for (ixCol=0; ixCol < cCols; ++ixCol) { if (ixCol) VWrite(", "); VWrite("%d:", ixCol); DumpRawCol(ixTbl, ixCol, rid, bStats); } VWriteLine(""); } } } // void MDInfo::DumpRaw() void MDInfo::DumpRawCSV() { ULONG cTables; // Tables in the database. ULONG cCols; // Columns in a table. ULONG cRows; // Rows in a table. ULONG cbRow; // Bytes in a row of a table. const char *pNameTable; // Name of a table. ULONG ulSize; m_pTables->GetNumTables(&cTables); VWriteLine("Name,Size,cRecs,cbRec"); m_pTables->GetStringHeapSize(&ulSize); VWriteLine("Strings,%d", ulSize); m_pTables->GetBlobHeapSize(&ulSize); VWriteLine("Blobs,%d", ulSize); m_pTables->GetGuidHeapSize(&ulSize); VWriteLine("Guids,%d", ulSize); for (ULONG ixTbl = 0; ixTbl < cTables; ++ixTbl) { m_pTables->GetTableInfo(ixTbl, &cbRow, &cRows, &cCols, NULL, &pNameTable); VWriteLine("%s,%d,%d,%d", pNameTable, cbRow*cRows, cRows, cbRow); } } // void MDInfo::DumpRawCSV()
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdio.h> #include <windows.h> #include <objbase.h> #include <crtdbg.h> #include <assert.h> #include <corpriv.h> #include <cor.h> #include "assert.h" #include "corerror.h" #include <winwrap.h> #include <prettyprintsig.h> #include <cahlpr.h> #include <limits.h> #include "mdinfo.h" #define ENUM_BUFFER_SIZE 10 #define TAB_SIZE 8 #define ISFLAG(p,x) if (Is##p##x(flags)) strcat_s(sFlags,STRING_BUFFER_LEN, "["#x "] "); extern HRESULT _FillVariant( BYTE bCPlusTypeFlag, void const *pValue, ULONG cbValue, VARIANT *pvar); // Validator declarations. extern DWORD g_ValModuleType; // Tables for mapping element type to text const char *g_szMapElementType[] = { "End", // 0x0 "Void", // 0x1 "Boolean", "Char", "I1", "UI1", "I2", // 0x6 "UI2", "I4", "UI4", "I8", "UI8", "R4", "R8", "String", "Ptr", // 0xf "ByRef", // 0x10 "ValueClass", "Class", "Var", "MDArray", // 0x14 "GenericInst", "TypedByRef", "VALUEARRAY", "I", "U", "R", // 0x1a "FNPTR", "Object", "SZArray", "MVar", "CMOD_REQD", "CMOD_OPT", "INTERNAL", }; const char *g_szMapUndecorateType[] = { "", // 0x0 "void", "boolean", "Char", "byte", "unsigned byte", "short", "unsigned short", "int", "unsigned int", "long", "unsigned long", "float", "double", "String", "*", // 0xf "ByRef", "", "", "", "", "", "", "", "", "", "", "Function Pointer", "Object", "", "", "CMOD_REQD", "CMOD_OPT", "INTERNAL", }; // Provide enough entries for IMAGE_CEE_CS_CALLCONV_MASK (defined in CorHdr.h) const char *g_strCalling[] = { "[DEFAULT]", "[C]", "[STDCALL]", "[THISCALL]", "[FASTCALL]", "[VARARG]", "[FIELD]", "[LOCALSIG]", "[PROPERTY]", "[UNMANAGED]", "[GENERICINST]", "[NATIVEVARARG]", "[INVALID]", "[INVALID]", "[INVALID]", "[INVALID]" }; const char *g_szNativeType[] = { "NATIVE_TYPE_END(DEPRECATED!)", // = 0x0, //DEPRECATED "NATIVE_TYPE_VOID(DEPRECATED!)", // = 0x1, //DEPRECATED "NATIVE_TYPE_BOOLEAN", // = 0x2, // (4 byte boolean value: TRUE = non-zero, FALSE = 0) "NATIVE_TYPE_I1", // = 0x3, "NATIVE_TYPE_U1", // = 0x4, "NATIVE_TYPE_I2", // = 0x5, "NATIVE_TYPE_U2", // = 0x6, "NATIVE_TYPE_I4", // = 0x7, "NATIVE_TYPE_U4", // = 0x8, "NATIVE_TYPE_I8", // = 0x9, "NATIVE_TYPE_U8", // = 0xa, "NATIVE_TYPE_R4", // = 0xb, "NATIVE_TYPE_R8", // = 0xc, "NATIVE_TYPE_SYSCHAR(DEPRECATED!)", // = 0xd, //DEPRECATED "NATIVE_TYPE_VARIANT(DEPRECATED!)", // = 0xe, //DEPRECATED "NATIVE_TYPE_CURRENCY", // = 0xf, "NATIVE_TYPE_PTR(DEPRECATED!)", // = 0x10, //DEPRECATED "NATIVE_TYPE_DECIMAL(DEPRECATED!)", // = 0x11, //DEPRECATED "NATIVE_TYPE_DATE(DEPRECATED!)", // = 0x12, //DEPRECATED "NATIVE_TYPE_BSTR", // = 0x13, "NATIVE_TYPE_LPSTR", // = 0x14, "NATIVE_TYPE_LPWSTR", // = 0x15, "NATIVE_TYPE_LPTSTR", // = 0x16, "NATIVE_TYPE_FIXEDSYSSTRING", // = 0x17, "NATIVE_TYPE_OBJECTREF(DEPRECATED!)", // = 0x18, //DEPRECATED "NATIVE_TYPE_IUNKNOWN", // = 0x19, "NATIVE_TYPE_IDISPATCH", // = 0x1a, "NATIVE_TYPE_STRUCT", // = 0x1b, "NATIVE_TYPE_INTF", // = 0x1c, "NATIVE_TYPE_SAFEARRAY", // = 0x1d, "NATIVE_TYPE_FIXEDARRAY", // = 0x1e, "NATIVE_TYPE_INT", // = 0x1f, "NATIVE_TYPE_UINT", // = 0x20, "NATIVE_TYPE_NESTEDSTRUCT(DEPRECATED!)", // = 0x21, //DEPRECATED (use "NATIVE_TYPE_STRUCT) "NATIVE_TYPE_BYVALSTR", // = 0x22, "NATIVE_TYPE_ANSIBSTR", // = 0x23, "NATIVE_TYPE_TBSTR", // = 0x24, // select BSTR or ANSIBSTR depending on platform "NATIVE_TYPE_VARIANTBOOL", // = 0x25, // (2-byte boolean value: TRUE = -1, FALSE = 0) "NATIVE_TYPE_FUNC", // = 0x26, "NATIVE_TYPE_LPVOID", // = 0x27, // blind pointer (no deep marshaling) "NATIVE_TYPE_ASANY", // = 0x28, "<UNDEFINED NATIVE TYPE 0x29>", "NATIVE_TYPE_ARRAY", // = 0x2a, "NATIVE_TYPE_LPSTRUCT", // = 0x2b, "NATIVE_TYPE_CUSTOMMARSHALER", // = 0x2c, // Custom marshaler. "NATIVE_TYPE_ERROR", // = 0x2d, // VT_HRESULT when exporting to a typelib. }; size_t g_cbCoffNames = 0; mdMethodDef g_tkEntryPoint = 0; // integration with ILDASM // helper to init signature buffer void MDInfo::InitSigBuffer() { strcpy_s((LPSTR)m_sigBuf.Ptr(), 1, ""); } // void MDInfo::InitSigBuffer() // helper to append a string into the signature buffer. If size of signature buffer is not big enough, // we will grow it. HRESULT MDInfo::AddToSigBuffer(_In_z_ const char *string) { HRESULT hr; size_t LL = strlen((LPSTR)m_sigBuf.Ptr()) + strlen(string) + 1; IfFailRet( m_sigBuf.ReSizeNoThrow(LL) ); strcat_s((LPSTR)m_sigBuf.Ptr(), LL, string); return NOERROR; } // HRESULT MDInfo::AddToSigBuffer() MDInfo::MDInfo(IMetaDataImport2 *pImport, IMetaDataAssemblyImport *pAssemblyImport, LPCWSTR szScope, strPassBackFn inPBFn, ULONG DumpFilter) { // This constructor is specific to ILDASM/MetaInfo integration _ASSERTE(pImport != NULL); _ASSERTE(ARRAY_SIZE(g_szMapElementType) == ARRAY_SIZE(g_szMapUndecorateType)); _ASSERTE(ARRAY_SIZE(g_szMapElementType) == ELEMENT_TYPE_MAX); Init(inPBFn, (DUMP_FILTER)DumpFilter); m_pImport = pImport; m_pImport->AddRef(); if ((m_pAssemblyImport = pAssemblyImport)) m_pAssemblyImport->AddRef(); else { HRESULT hr = m_pImport->QueryInterface(IID_IMetaDataAssemblyImport, (void**) &m_pAssemblyImport); if (FAILED(hr)) Error("QueryInterface failed for IID_IMetaDataAssemblyImport.", hr); } } // MDInfo::MDInfo() MDInfo::MDInfo(IMetaDataDispenserEx *pDispenser, LPCWSTR szScope, strPassBackFn inPBFn, ULONG DumpFilter) { HRESULT hr = S_OK; VARIANT value; _ASSERTE(pDispenser != NULL && inPBFn != NULL); _ASSERTE(ARRAY_SIZE(g_szMapElementType) == ARRAY_SIZE(g_szMapUndecorateType)); _ASSERTE(ARRAY_SIZE(g_szMapElementType) == ELEMENT_TYPE_MAX); Init(inPBFn, (DUMP_FILTER)DumpFilter); // Attempt to open scope on given file V_VT(&value) = VT_UI4; V_UI4(&value) = MDImportOptionAll; if (FAILED(hr = pDispenser->SetOption(MetaDataImportOption, &value))) Error("SetOption failed.", hr); hr = pDispenser->OpenScope(szScope, ofNoTransform, IID_IMetaDataImport2, (IUnknown**)&m_pImport); if (hr == CLDB_E_BADUPDATEMODE) { V_VT(&value) = VT_UI4; V_UI4(&value) = MDUpdateIncremental; if (FAILED(hr = pDispenser->SetOption(MetaDataSetUpdate, &value))) Error("SetOption failed.", hr); hr = pDispenser->OpenScope(szScope, ofNoTransform, IID_IMetaDataImport2, (IUnknown**)&m_pImport); } if (FAILED(hr)) Error("OpenScope failed", hr); // Query for the IMetaDataAssemblyImport interface. hr = m_pImport->QueryInterface(IID_IMetaDataAssemblyImport, (void**) &m_pAssemblyImport); if (FAILED(hr)) Error("QueryInterface failed for IID_IMetaDataAssemblyImport.", hr); } // MDInfo::MDInfo() MDInfo::MDInfo(IMetaDataDispenserEx *pDispenser, PBYTE pbMetaData, DWORD dwSize, strPassBackFn inPBFn, ULONG DumpFilter) { _ASSERTE(pDispenser != NULL && inPBFn != NULL); _ASSERTE(ARRAY_SIZE(g_szMapElementType) == ARRAY_SIZE(g_szMapUndecorateType)); _ASSERTE(ARRAY_SIZE(g_szMapElementType) == ELEMENT_TYPE_MAX); Init(inPBFn, (DUMP_FILTER)DumpFilter); // Attempt to open scope on manifest. It's valid for this to fail, because // the blob we open may just be the assembly resources (the space is // overloaded until we remove LM -a assemblies, at which point this // constructor should probably be removed too). HRESULT hr; VARIANT value; V_VT(&value) = VT_UI4; V_UI4(&value) = MDImportOptionAll; if (FAILED(hr = pDispenser->SetOption(MetaDataImportOption, &value))) Error("SetOption failed.", hr); if (SUCCEEDED(hr = pDispenser->OpenScopeOnMemory(pbMetaData, dwSize, ofNoTransform, IID_IMetaDataImport2, (IUnknown**)&m_pImport))) { // Query for the IMetaDataAssemblyImport interface. hr = m_pImport->QueryInterface(IID_IMetaDataAssemblyImport, (void**) &m_pAssemblyImport); if (FAILED(hr)) Error("QueryInterface failed for IID_IMetaDataAssemblyImport.", hr); } } // MDInfo::MDInfo() void MDInfo::Init( strPassBackFn inPBFn, // Callback to write text. DUMP_FILTER DumpFilter) // Flags to control the dump. { m_pbFn = inPBFn; m_DumpFilter = DumpFilter; m_pTables = NULL; m_pTables2 = NULL; m_pImport = NULL; m_pAssemblyImport = NULL; } // void MDInfo::Init() // Destructor MDInfo::~MDInfo() { if (m_pImport) m_pImport->Release(); if (m_pAssemblyImport) m_pAssemblyImport->Release(); if (m_pTables) m_pTables->Release(); if (m_pTables2) m_pTables2->Release(); } // MDInfo::~MDInfo() //===================================================================================================================== // DisplayMD() function // // Displays the meta data content of a file void MDInfo::DisplayMD() { if ((m_DumpFilter & dumpAssem) && m_pAssemblyImport) DisplayAssemblyInfo(); WriteLine("==========================================================="); // Metadata itself: Raw or normal view if (m_DumpFilter & (dumpSchema | dumpHeader | dumpCSV | dumpRaw | dumpStats | dumpRawHeaps)) DisplayRaw(); else { DisplayVersionInfo(); DisplayScopeInfo(); WriteLine("==========================================================="); DisplayGlobalFunctions(); DisplayGlobalFields(); DisplayGlobalMemberRefs(); DisplayTypeDefs(); DisplayTypeRefs(); DisplayTypeSpecs(); DisplayMethodSpecs(); DisplayModuleRefs(); DisplaySignatures(); DisplayAssembly(); DisplayUserStrings(); // WriteLine("============================================================"); // WriteLine("Unresolved MemberRefs"); // DisplayMemberRefs(0x00000001, "\t"); VWrite("\n\nCoff symbol name overhead: %d\n", g_cbCoffNames); } WriteLine("==========================================================="); if (m_DumpFilter & dumpUnsat) DisplayUnsatInfo(); WriteLine("==========================================================="); } // MDVEHandlerClass() int MDInfo::WriteLine(_In_z_ const char *str) { ULONG32 count = (ULONG32) strlen(str); m_pbFn(str); m_pbFn("\n"); return count; } // int MDInfo::WriteLine() int MDInfo::Write(_In_z_ const char *str) { ULONG32 count = (ULONG32) strlen(str); m_pbFn(str); return count; } // int MDInfo::Write() int MDInfo::VWriteLine(_In_z_ const char *str, ...) { va_list marker; int count; va_start(marker, str); count = VWriteMarker(str, marker); m_pbFn("\n"); va_end(marker); return count; } // int MDInfo::VWriteLine() int MDInfo::VWrite(_In_z_ const char *str, ...) { va_list marker; int count; va_start(marker, str); count = VWriteMarker(str, marker); va_end(marker); return count; } // int MDInfo::VWrite() int MDInfo::VWriteMarker(_In_z_ const char *str, va_list marker) { HRESULT hr; int count = -1; // Used to allocate 1K, then if not enough, 2K, then 4K. // Faster to allocate 32K right away and be done with it, // we're not running on Commodore 64 if (FAILED(hr = m_output.ReSizeNoThrow(STRING_BUFFER_LEN * 8))) Error("ReSize failed.", hr); else { count = vsprintf_s((char *)m_output.Ptr(), STRING_BUFFER_LEN * 8, str, marker); m_pbFn((char *)m_output.Ptr()); } return count; } // int MDInfo::VWriteToBuffer() // Error() function -- prints an error and returns void MDInfo::Error(const char* szError, HRESULT hr) { printf("\n%s\n",szError); if (hr != S_OK) { printf("Failed return code: 0x%08x\n", hr); IErrorInfo *pIErr = NULL; // Error interface. BSTR bstrDesc = NULL; // Description text. #ifdef FEATURE_COMINTEROP // Try to get an error info object and display the message. if (GetErrorInfo(0, &pIErr) == S_OK && pIErr->GetDescription(&bstrDesc) == S_OK) { printf("%ls ", bstrDesc); SysFreeString(bstrDesc); } #endif // Free the error interface. if (pIErr) pIErr->Release(); } exit(hr); } // void MDInfo::Error() // Print out the optional version info included in the MetaData. void MDInfo::DisplayVersionInfo() { if (!(m_DumpFilter & MDInfo::dumpNoLogo)) { LPCUTF8 pVersionStr; HRESULT hr = S_OK; if (m_pTables == 0) { if (m_pImport) hr = m_pImport->QueryInterface(IID_IMetaDataTables, (void**)&m_pTables); else if (m_pAssemblyImport) hr = m_pAssemblyImport->QueryInterface(IID_IMetaDataTables, (void**)&m_pTables); else return; if (FAILED(hr)) Error("QueryInterface failed for IID_IMetaDataTables.", hr); } hr = m_pTables->GetString(1, &pVersionStr); if (FAILED(hr)) Error("GetString() failed.", hr); if (strstr(pVersionStr, "Version of runtime against which the binary is built : ") == pVersionStr) { WriteLine(const_cast<char *>(pVersionStr)); } } } // void MDInfo::DisplayVersionInfo() // Prints out information about the scope void MDInfo::DisplayScopeInfo() { HRESULT hr; mdModule mdm; GUID mvid; WCHAR scopeName[STRING_BUFFER_LEN]; WCHAR guidString[STRING_BUFFER_LEN]; hr = m_pImport->GetScopeProps( scopeName, STRING_BUFFER_LEN, 0, &mvid); if (FAILED(hr)) Error("GetScopeProps failed.", hr); VWriteLine("ScopeName : %ls",scopeName); if (!(m_DumpFilter & MDInfo::dumpNoLogo)) VWriteLine("MVID : %ls",GUIDAsString(mvid, guidString, STRING_BUFFER_LEN)); hr = m_pImport->GetModuleFromScope(&mdm); if (FAILED(hr)) Error("GetModuleFromScope failed.", hr); DisplayPermissions(mdm, ""); DisplayCustomAttributes(mdm, "\t"); } // void MDInfo::DisplayScopeInfo() void MDInfo::DisplayRaw() { int iDump; // Level of info to dump. if (m_pTables == 0) m_pImport->QueryInterface(IID_IMetaDataTables, (void**)&m_pTables); if (m_pTables == 0) Error("Can't get table info."); if (m_pTables2 == 0) m_pImport->QueryInterface(IID_IMetaDataTables2, (void**)&m_pTables2); if (m_DumpFilter & dumpCSV) DumpRawCSV(); if (m_DumpFilter & (dumpSchema | dumpHeader | dumpRaw | dumpStats)) { if (m_DumpFilter & dumpRaw) iDump = 3; else if (m_DumpFilter & dumpSchema) iDump = 2; else iDump = 1; DumpRaw(iDump, (m_DumpFilter & dumpStats) != 0); } if (m_DumpFilter & dumpRawHeaps) DumpRawHeaps(); } // void MDInfo::DisplayRaw() // return the name of the type of token passed in const char *MDInfo::TokenTypeName(mdToken inToken) { switch(TypeFromToken(inToken)) { case mdtTypeDef: return "TypeDef"; case mdtInterfaceImpl: return "InterfaceImpl"; case mdtMethodDef: return "MethodDef"; case mdtFieldDef: return "FieldDef"; case mdtTypeRef: return "TypeRef"; case mdtMemberRef: return "MemberRef"; case mdtCustomAttribute:return "CustomAttribute"; case mdtParamDef: return "ParamDef"; case mdtProperty: return "Property"; case mdtEvent: return "Event"; case mdtTypeSpec: return "TypeSpec"; default: return "[UnknownTokenType]"; } } // char *MDInfo::TokenTypeName() // Prints out name of the given memberref // LPCWSTR MDInfo::MemberRefName(mdMemberRef inMemRef, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen) { HRESULT hr; hr = m_pImport->GetMemberRefProps( inMemRef, NULL, buffer, bufLen, NULL, NULL, NULL); if (FAILED(hr)) Error("GetMemberRefProps failed.", hr); return buffer; } // LPCWSTR MDInfo::MemberRefName() // Prints out information about the given memberref // void MDInfo::DisplayMemberRefInfo(mdMemberRef inMemRef, const char *preFix) { HRESULT hr; WCHAR memRefName[STRING_BUFFER_LEN]; ULONG nameLen; mdToken token; PCCOR_SIGNATURE pbSigBlob; ULONG ulSigBlob; char newPreFix[STRING_BUFFER_LEN]; hr = m_pImport->GetMemberRefProps( inMemRef, &token, memRefName, STRING_BUFFER_LEN, &nameLen, &pbSigBlob, &ulSigBlob); if (FAILED(hr)) Error("GetMemberRefProps failed.", hr); VWriteLine("%s\t\tMember: (%8.8x) %ls: ", preFix, inMemRef, memRefName); if (ulSigBlob) DisplaySignature(pbSigBlob, ulSigBlob, preFix); else VWriteLine("%s\t\tERROR: no valid signature ", preFix); sprintf_s (newPreFix, STRING_BUFFER_LEN, "\t\t%s", preFix); DisplayCustomAttributes(inMemRef, newPreFix); } // void MDInfo::DisplayMemberRefInfo() // Prints out information about all memberrefs of the given typeref // void MDInfo::DisplayMemberRefs(mdToken tkParent, const char *preFix) { HCORENUM memRefEnum = NULL; HRESULT hr; mdMemberRef memRefs[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; while (SUCCEEDED(hr = m_pImport->EnumMemberRefs( &memRefEnum, tkParent, memRefs, ARRAY_SIZE(memRefs), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("%s\tMemberRef #%d (%08x)", preFix, totalCount, memRefs[i]); VWriteLine("%s\t-------------------------------------------------------", preFix); DisplayMemberRefInfo(memRefs[i], preFix); } } m_pImport->CloseEnum( memRefEnum); } // void MDInfo::DisplayMemberRefs() // Prints out information about all resources in the com object // // Iterates through each typeref and prints out the information of each // void MDInfo::DisplayTypeRefs() { HCORENUM typeRefEnum = NULL; mdTypeRef typeRefs[ENUM_BUFFER_SIZE]; ULONG count, totalCount=1; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumTypeRefs( &typeRefEnum, typeRefs, ARRAY_SIZE(typeRefs), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("TypeRef #%d (%08x)", totalCount, typeRefs[i]); WriteLine("-------------------------------------------------------"); DisplayTypeRefInfo(typeRefs[i]); DisplayMemberRefs(typeRefs[i], ""); WriteLine(""); } } m_pImport->CloseEnum( typeRefEnum); } // void MDInfo::DisplayTypeRefs() void MDInfo::DisplayTypeSpecs() { HCORENUM typespecEnum = NULL; mdTypeSpec typespecs[ENUM_BUFFER_SIZE]; ULONG count, totalCount=1; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumTypeSpecs( &typespecEnum, typespecs, ARRAY_SIZE(typespecs), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("TypeSpec #%d (%08x)", totalCount, typespecs[i]); WriteLine("-------------------------------------------------------"); DisplayTypeSpecInfo(typespecs[i], ""); DisplayMemberRefs(typespecs[i], ""); WriteLine(""); } } m_pImport->CloseEnum( typespecEnum); } // void MDInfo::DisplayTypeSpecs() void MDInfo::DisplayMethodSpecs() { HCORENUM MethodSpecEnum = NULL; mdMethodSpec MethodSpecs[ENUM_BUFFER_SIZE]; ULONG count, totalCount=1; ///// HRESULT hr; ///// HACK until I implement EnumMethodSpecs! ///// while (SUCCEEDED(hr = m_pImport->EnumMethodSpecs( &MethodSpecEnum, ///// MethodSpecs, ARRAY_SIZE(MethodSpecs), &count)) && ///// count > 0) for (ULONG rid=1; m_pImport->IsValidToken(TokenFromRid(rid, mdtMethodSpec)); ++rid) { // More hackery count = 1; MethodSpecs[0] = TokenFromRid(rid, mdtMethodSpec); // More hackery for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("MethodSpec #%d (%08x)", totalCount, MethodSpecs[i]); DisplayMethodSpecInfo(MethodSpecs[i], ""); WriteLine(""); } } m_pImport->CloseEnum( MethodSpecEnum); } // void MDInfo::DisplayMethodSpecs() // Called to display the information about all typedefs in the object. // void MDInfo::DisplayTypeDefs() { HCORENUM typeDefEnum = NULL; mdTypeDef typeDefs[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumTypeDefs( &typeDefEnum, typeDefs, ARRAY_SIZE(typeDefs), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("TypeDef #%d (%08x)", totalCount, typeDefs[i]); WriteLine("-------------------------------------------------------"); DisplayTypeDefInfo(typeDefs[i]); WriteLine(""); } } m_pImport->CloseEnum( typeDefEnum); } // void MDInfo::DisplayTypeDefs() // Called to display the information about all modulerefs in the object. // void MDInfo::DisplayModuleRefs() { HCORENUM moduleRefEnum = NULL; mdModuleRef moduleRefs[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumModuleRefs( &moduleRefEnum, moduleRefs, ARRAY_SIZE(moduleRefs), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("ModuleRef #%d (%08x)", totalCount, moduleRefs[i]); WriteLine("-------------------------------------------------------"); DisplayModuleRefInfo(moduleRefs[i]); DisplayMemberRefs(moduleRefs[i], ""); WriteLine(""); } } m_pImport->CloseEnum( moduleRefEnum); } // void MDInfo::DisplayModuleRefs() // Prints out information about the given moduleref // void MDInfo::DisplayModuleRefInfo(mdModuleRef inModuleRef) { HRESULT hr; WCHAR moduleRefName[STRING_BUFFER_LEN]; ULONG nameLen; hr = m_pImport->GetModuleRefProps( inModuleRef, moduleRefName, STRING_BUFFER_LEN, &nameLen); if (FAILED(hr)) Error("GetModuleRefProps failed.", hr); VWriteLine("\t\tModuleRef: (%8.8x) %ls: ", inModuleRef, moduleRefName); DisplayCustomAttributes(inModuleRef, "\t\t"); } // void MDInfo::DisplayModuleRefInfo() // Called to display the information about all signatures in the object. // void MDInfo::DisplaySignatures() { HCORENUM signatureEnum = NULL; mdSignature signatures[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumSignatures( &signatureEnum, signatures, ARRAY_SIZE(signatures), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("Signature #%d (%#08x)", totalCount, signatures[i]); WriteLine("-------------------------------------------------------"); DisplaySignatureInfo(signatures[i]); WriteLine(""); } } m_pImport->CloseEnum( signatureEnum); } // void MDInfo::DisplaySignatures() // Prints out information about the given signature // void MDInfo::DisplaySignatureInfo(mdSignature inSignature) { HRESULT hr; PCCOR_SIGNATURE pbSigBlob; ULONG ulSigBlob; hr = m_pImport->GetSigFromToken( inSignature, &pbSigBlob, &ulSigBlob ); if (FAILED(hr)) Error("GetSigFromToken failed.", hr); if(ulSigBlob) DisplaySignature(pbSigBlob, ulSigBlob, ""); else VWriteLine("\t\tERROR: no valid signature "); } // void MDInfo::DisplaySignatureInfo() // returns the passed-in buffer which is filled with the name of the given // member in wide characters // LPCWSTR MDInfo::MemberName(mdToken inToken, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen) { HRESULT hr; hr = m_pImport->GetMemberProps( inToken, NULL, buffer, bufLen, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); if (FAILED(hr)) Error("GetMemberProps failed.", hr); return (buffer); } // LPCWSTR MDInfo::MemberName() // displays information for the given method // void MDInfo::DisplayMethodInfo(mdMethodDef inMethod, DWORD *pflags) { HRESULT hr; mdTypeDef memTypeDef; WCHAR memberName[STRING_BUFFER_LEN]; ULONG nameLen; DWORD flags; PCCOR_SIGNATURE pbSigBlob; ULONG ulSigBlob; ULONG ulCodeRVA; ULONG ulImplFlags; hr = m_pImport->GetMethodProps( inMethod, &memTypeDef, memberName, STRING_BUFFER_LEN, &nameLen, &flags, &pbSigBlob, &ulSigBlob, &ulCodeRVA, &ulImplFlags); if (FAILED(hr)) Error("GetMethodProps failed.", hr); if (pflags) *pflags = flags; VWriteLine("\t\tMethodName: %ls (%8.8X)", memberName, inMethod); char sFlags[STRING_BUFFER_LEN]; sFlags[0] = 0; ISFLAG(Md, Public); ISFLAG(Md, Private); ISFLAG(Md, Family); ISFLAG(Md, Assem); ISFLAG(Md, FamANDAssem); ISFLAG(Md, FamORAssem); ISFLAG(Md, PrivateScope); ISFLAG(Md, Static); ISFLAG(Md, Final); ISFLAG(Md, Virtual); ISFLAG(Md, HideBySig); ISFLAG(Md, ReuseSlot); ISFLAG(Md, NewSlot); ISFLAG(Md, Abstract); ISFLAG(Md, SpecialName); ISFLAG(Md, RTSpecialName); ISFLAG(Md, PinvokeImpl); ISFLAG(Md, UnmanagedExport); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); bool result = (((flags) & mdRTSpecialName) && !wcscmp((memberName), W(".ctor"))); if (result) strcat_s(sFlags, STRING_BUFFER_LEN, "[.ctor] "); result = (((flags) & mdRTSpecialName) && !wcscmp((memberName), W(".cctor"))); if (result) strcat_s(sFlags,STRING_BUFFER_LEN, "[.cctor] "); // "Reserved" flags ISFLAG(Md, HasSecurity); ISFLAG(Md, RequireSecObject); VWriteLine("\t\tFlags : %s (%08x)", sFlags, flags); VWriteLine("\t\tRVA : 0x%08x", ulCodeRVA); flags = ulImplFlags; sFlags[0] = 0; ISFLAG(Mi, Native); ISFLAG(Mi, IL); ISFLAG(Mi, OPTIL); ISFLAG(Mi, Runtime); ISFLAG(Mi, Unmanaged); ISFLAG(Mi, Managed); ISFLAG(Mi, ForwardRef); ISFLAG(Mi, PreserveSig); ISFLAG(Mi, InternalCall); ISFLAG(Mi, Synchronized); ISFLAG(Mi, NoInlining); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); VWriteLine("\t\tImplFlags : %s (%08x)", sFlags, flags); if (ulSigBlob) DisplaySignature(pbSigBlob, ulSigBlob, ""); else VWriteLine("\t\tERROR: no valid signature "); DisplayGenericParams(inMethod, "\t\t"); } // void MDInfo::DisplayMethodInfo() // displays the member information for the given field // void MDInfo::DisplayFieldInfo(mdFieldDef inField, DWORD *pdwFlags) { HRESULT hr; mdTypeDef memTypeDef; WCHAR memberName[STRING_BUFFER_LEN]; ULONG nameLen; DWORD flags; PCCOR_SIGNATURE pbSigBlob; ULONG ulSigBlob; DWORD dwCPlusTypeFlag; void const *pValue; ULONG cbValue; #ifdef FEATURE_COMINTEROP VARIANT defaultValue; ::VariantInit(&defaultValue); #endif hr = m_pImport->GetFieldProps( inField, &memTypeDef, memberName, STRING_BUFFER_LEN, &nameLen, &flags, &pbSigBlob, &ulSigBlob, &dwCPlusTypeFlag, &pValue, &cbValue); if (FAILED(hr)) Error("GetFieldProps failed.", hr); if (pdwFlags) *pdwFlags = flags; #ifdef FEATURE_COMINTEROP _FillVariant((BYTE)dwCPlusTypeFlag, pValue, cbValue, &defaultValue); #endif char sFlags[STRING_BUFFER_LEN]; sFlags[0] = 0; ISFLAG(Fd, Public); ISFLAG(Fd, Private); ISFLAG(Fd, Family); ISFLAG(Fd, Assembly); ISFLAG(Fd, FamANDAssem); ISFLAG(Fd, FamORAssem); ISFLAG(Fd, PrivateScope); ISFLAG(Fd, Static); ISFLAG(Fd, InitOnly); ISFLAG(Fd, Literal); ISFLAG(Fd, NotSerialized); ISFLAG(Fd, SpecialName); ISFLAG(Fd, RTSpecialName); ISFLAG(Fd, PinvokeImpl); // "Reserved" flags ISFLAG(Fd, HasDefault); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); VWriteLine("\t\tField Name: %ls (%8.8X)", memberName, inField); VWriteLine("\t\tFlags : %s (%08x)", sFlags, flags); #ifdef FEATURE_COMINTEROP if (IsFdHasDefault(flags)) VWriteLine("\tDefltValue: (%s) %ls", g_szMapElementType[dwCPlusTypeFlag], VariantAsString(&defaultValue)); #endif if (!ulSigBlob) // Signature size should be non-zero for fields VWriteLine("\t\tERROR: no valid signature "); else DisplaySignature(pbSigBlob, ulSigBlob, ""); #ifdef FEATURE_COMINTEROP ::VariantClear(&defaultValue); #endif } // void MDInfo::DisplayFieldInfo() // displays the RVA for the given global field. void MDInfo::DisplayFieldRVA(mdFieldDef inFieldDef) { HRESULT hr; ULONG ulRVA; hr = m_pImport->GetRVA(inFieldDef, &ulRVA, 0); if (FAILED(hr) && hr != CLDB_E_RECORD_NOTFOUND) Error("GetRVA failed.", hr); VWriteLine("\t\tRVA : 0x%08x", ulRVA); } // void MDInfo::DisplayFieldRVA() // displays information about every global function. void MDInfo::DisplayGlobalFunctions() { WriteLine("Global functions"); WriteLine("-------------------------------------------------------"); DisplayMethods(mdTokenNil); WriteLine(""); } // void MDInfo::DisplayGlobalFunctions() // displays information about every global field. void MDInfo::DisplayGlobalFields() { WriteLine("Global fields"); WriteLine("-------------------------------------------------------"); DisplayFields(mdTokenNil, NULL, 0); WriteLine(""); } // void MDInfo::DisplayGlobalFields() // displays information about every global memberref. void MDInfo::DisplayGlobalMemberRefs() { WriteLine("Global MemberRefs"); WriteLine("-------------------------------------------------------"); DisplayMemberRefs(mdTokenNil, ""); WriteLine(""); } // void MDInfo::DisplayGlobalMemberRefs() // displays information about every method in a given typedef // void MDInfo::DisplayMethods(mdTypeDef inTypeDef) { HCORENUM methodEnum = NULL; mdToken methods[ENUM_BUFFER_SIZE]; DWORD flags; ULONG count, totalCount = 1; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumMethods( &methodEnum, inTypeDef, methods, ARRAY_SIZE(methods), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("\tMethod #%d (%08x) %s", totalCount, methods[i], (methods[i] == g_tkEntryPoint) ? "[ENTRYPOINT]" : ""); WriteLine("\t-------------------------------------------------------"); DisplayMethodInfo(methods[i], &flags); DisplayParams(methods[i]); DisplayCustomAttributes(methods[i], "\t\t"); DisplayPermissions(methods[i], "\t"); DisplayMemberRefs(methods[i], "\t"); // P-invoke data if present. if (IsMdPinvokeImpl(flags)) DisplayPinvokeInfo(methods[i]); WriteLine(""); } } m_pImport->CloseEnum( methodEnum); } // void MDInfo::DisplayMethods() // displays information about every field in a given typedef // void MDInfo::DisplayFields(mdTypeDef inTypeDef, COR_FIELD_OFFSET *rFieldOffset, ULONG cFieldOffset) { HCORENUM fieldEnum = NULL; mdToken fields[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; DWORD flags; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumFields( &fieldEnum, inTypeDef, fields, ARRAY_SIZE(fields), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("\tField #%d (%08x)",totalCount, fields[i]); WriteLine("\t-------------------------------------------------------"); DisplayFieldInfo(fields[i], &flags); DisplayCustomAttributes(fields[i], "\t\t"); DisplayPermissions(fields[i], "\t"); DisplayFieldMarshal(fields[i]); // RVA if its a global field. if (inTypeDef == mdTokenNil) DisplayFieldRVA(fields[i]); // P-invoke data if present. if (IsFdPinvokeImpl(flags)) DisplayPinvokeInfo(fields[i]); // Display offset if present. if (cFieldOffset) { bool found = false; for (ULONG iLayout = 0; iLayout < cFieldOffset; ++iLayout) { if (RidFromToken(rFieldOffset[iLayout].ridOfField) == RidFromToken(fields[i])) { found = true; VWriteLine("\t\tOffset : 0x%08x", rFieldOffset[iLayout].ulOffset); break; } } _ASSERTE(found); } WriteLine(""); } } m_pImport->CloseEnum( fieldEnum); } // void MDInfo::DisplayFields() // displays information about every methodImpl in a given typedef // void MDInfo::DisplayMethodImpls(mdTypeDef inTypeDef) { HCORENUM methodImplEnum = NULL; mdMethodDef rtkMethodBody[ENUM_BUFFER_SIZE]; mdMethodDef rtkMethodDecl[ENUM_BUFFER_SIZE]; ULONG count, totalCount=1; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumMethodImpls( &methodImplEnum, inTypeDef, rtkMethodBody, rtkMethodDecl, ARRAY_SIZE(rtkMethodBody), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("\n\tMethodImpl #%d (%08x)", totalCount, totalCount); WriteLine("\t-------------------------------------------------------"); VWriteLine("\t\tMethod Body Token : 0x%08x", rtkMethodBody[i]); VWriteLine("\t\tMethod Declaration Token : 0x%08x", rtkMethodDecl[i]); WriteLine(""); } } m_pImport->CloseEnum( methodImplEnum); } // void MDInfo::DisplayMethodImpls() // displays information about the given parameter // void MDInfo::DisplayParamInfo(mdParamDef inParamDef) { mdMethodDef md; ULONG num; WCHAR paramName[STRING_BUFFER_LEN]; ULONG nameLen; DWORD flags; VARIANT defValue; DWORD dwCPlusFlags; void const *pValue; ULONG cbValue; #ifdef FEATURE_COMINTEROP ::VariantInit(&defValue); #endif HRESULT hr = m_pImport->GetParamProps( inParamDef, &md, &num, paramName, ARRAY_SIZE(paramName), &nameLen, &flags, &dwCPlusFlags, &pValue, &cbValue); if (FAILED(hr)) Error("GetParamProps failed.", hr); _FillVariant((BYTE)dwCPlusFlags, pValue, cbValue, &defValue); char sFlags[STRING_BUFFER_LEN]; sFlags[0] = 0; ISFLAG(Pd, In); ISFLAG(Pd, Out); ISFLAG(Pd, Optional); // "Reserved" flags. ISFLAG(Pd, HasDefault); ISFLAG(Pd, HasFieldMarshal); if (!*sFlags) strcpy_s(sFlags,STRING_BUFFER_LEN, "[none]"); VWrite("\t\t\t(%ld) ParamToken : (%08x) Name : %ls flags: %s (%08x)", num, inParamDef, paramName, sFlags, flags); #ifdef FEATURE_COMINTEROP if (IsPdHasDefault(flags)) VWriteLine(" Default: (%s) %ls", g_szMapElementType[dwCPlusFlags], VariantAsString(&defValue)); else #endif VWriteLine(""); DisplayCustomAttributes(inParamDef, "\t\t\t"); #ifdef FEATURE_COMINTEROP ::VariantClear(&defValue); #endif } // void MDInfo::DisplayParamInfo() // displays all parameters for a given memberdef // void MDInfo::DisplayParams(mdMethodDef inMethodDef) { HCORENUM paramEnum = NULL; mdParamDef params[ENUM_BUFFER_SIZE]; ULONG count, paramCount; bool first = true; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumParams( &paramEnum, inMethodDef, params, ARRAY_SIZE(params), &count)) && count > 0) { if (first) { m_pImport->CountEnum( paramEnum, &paramCount); VWriteLine("\t\t%d Parameters", paramCount); } for (ULONG i = 0; i < count; i++) { DisplayParamInfo(params[i]); DisplayFieldMarshal(params[i]); } first = false; } m_pImport->CloseEnum( paramEnum); } // void MDInfo::DisplayParams() void MDInfo::DisplayGenericParams(mdToken tk, const char *prefix) { HCORENUM paramEnum = NULL; mdParamDef params[ENUM_BUFFER_SIZE]; ULONG count, paramCount; bool first = true; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumGenericParams( &paramEnum, tk, params, ARRAY_SIZE(params), &count)) && count > 0) { if (first) { m_pImport->CountEnum( paramEnum, &paramCount); VWriteLine("%s%d Generic Parameters", prefix, paramCount); } for (ULONG i = 0; i < count; i++) { DisplayGenericParamInfo(params[i], prefix); } first = false; } m_pImport->CloseEnum( paramEnum); } void MDInfo::DisplayGenericParamInfo(mdGenericParam tkParam, const char *prefix) { ULONG ulSeq; WCHAR paramName[STRING_BUFFER_LEN]; ULONG nameLen; DWORD flags; mdToken tkOwner; char newprefix[30]; HCORENUM constraintEnum = NULL; mdParamDef constraints[4]; ULONG count, constraintCount; mdToken constraint; mdToken owner; bool first = true; HRESULT hr = m_pImport->GetGenericParamProps(tkParam, &ulSeq, &flags, &tkOwner, NULL, paramName, ARRAY_SIZE(paramName), &nameLen); if (FAILED(hr)) Error("GetGenericParamProps failed.", hr); VWriteLine("%s\t(%ld) GenericParamToken : (%08x) Name : %ls flags: %08x Owner: %08x", prefix, ulSeq, tkParam, paramName, flags, tkOwner); // Any constraints for the GenericParam while (SUCCEEDED(hr = m_pImport->EnumGenericParamConstraints(&constraintEnum, tkParam, constraints, ARRAY_SIZE(constraints), &count)) && count > 0) { if (first) { m_pImport->CountEnum( constraintEnum, &constraintCount); VWriteLine("%s\t\t%d Constraint(s)", prefix, constraintCount); } VWrite("%s\t\t", prefix); for (ULONG i=0; i< count; ++i) { hr = m_pImport->GetGenericParamConstraintProps(constraints[i], &owner, &constraint); if (owner != tkParam) VWrite("%08x (owner: %08x) ", constraint, owner); else VWrite("%08x ", constraint); } VWriteLine(""); } m_pImport->CloseEnum(constraintEnum); sprintf_s(newprefix, 30, "%s\t", prefix); DisplayCustomAttributes(tkParam, newprefix); } LPCWSTR MDInfo::TokenName(mdToken inToken, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen) { LPCUTF8 pName; // Token name in UTF8. if (IsNilToken(inToken)) return W(""); m_pImport->GetNameFromToken(inToken, &pName); WszMultiByteToWideChar(CP_UTF8,0, pName,-1, buffer,bufLen); return buffer; } // LPCWSTR MDInfo::TokenName() // prints out name of typeref or typedef // LPCWSTR MDInfo::TypeDeforRefName(mdToken inToken, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen) { if (RidFromToken(inToken)) { if (TypeFromToken(inToken) == mdtTypeDef) return (TypeDefName((mdTypeDef) inToken, buffer, bufLen)); else if (TypeFromToken(inToken) == mdtTypeRef) return (TypeRefName((mdTypeRef) inToken, buffer, bufLen)); else if (TypeFromToken(inToken) == mdtTypeSpec) return W("[TypeSpec]"); else return W("[InvalidReference]"); } else return W(""); } // LPCWSTR MDInfo::TypeDeforRefName() LPCWSTR MDInfo::MemberDeforRefName(mdToken inToken, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen) { if (RidFromToken(inToken)) { if (TypeFromToken(inToken) == mdtMethodDef || TypeFromToken(inToken) == mdtFieldDef) return (MemberName(inToken, buffer, bufLen)); else if (TypeFromToken(inToken) == mdtMemberRef) return (MemberRefName((mdMemberRef) inToken, buffer, bufLen)); else return W("[InvalidReference]"); } else return W(""); } // LPCWSTR MDInfo::MemberDeforRefName() // prints out only the name of the given typedef // // LPCWSTR MDInfo::TypeDefName(mdTypeDef inTypeDef, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen) { HRESULT hr; hr = m_pImport->GetTypeDefProps( // [IN] The import scope. inTypeDef, // [IN] TypeDef token for inquiry. buffer, // [OUT] Put name here. bufLen, // [IN] size of name buffer in wide chars. NULL, // [OUT] put size of name (wide chars) here. NULL, // [OUT] Put flags here. NULL); // [OUT] Put base class TypeDef/TypeRef here. if (FAILED(hr)) { swprintf_s(buffer, bufLen, W("[Invalid TypeDef]")); } return buffer; } // LPCWSTR MDInfo::TypeDefName() // prints out all the properties of a given typedef // void MDInfo::DisplayTypeDefProps(mdTypeDef inTypeDef) { HRESULT hr; WCHAR typeDefName[STRING_BUFFER_LEN]; ULONG nameLen; DWORD flags; mdToken extends; ULONG dwPacking; // Packing size of class, if specified. ULONG dwSize; // Total size of class, if specified. hr = m_pImport->GetTypeDefProps( inTypeDef, // [IN] TypeDef token for inquiry. typeDefName, // [OUT] Put name here. STRING_BUFFER_LEN, // [IN] size of name buffer in wide chars. &nameLen, // [OUT] put size of name (wide chars) here. &flags, // [OUT] Put flags here. &extends); // [OUT] Put base class TypeDef/TypeRef here. if (FAILED(hr)) Error("GetTypeDefProps failed.", hr); char sFlags[STRING_BUFFER_LEN]; WCHAR szTempBuf[STRING_BUFFER_LEN]; VWriteLine("\tTypDefName: %ls (%8.8X)",typeDefName,inTypeDef); VWriteLine("\tFlags : %s (%08x)",ClassFlags(flags, sFlags), flags); VWriteLine("\tExtends : %8.8X [%s] %ls",extends,TokenTypeName(extends), TypeDeforRefName(extends, szTempBuf, ARRAY_SIZE(szTempBuf))); hr = m_pImport->GetClassLayout(inTypeDef, &dwPacking, 0,0,0, &dwSize); if (hr == S_OK) VWriteLine("\tLayout : Packing:%d, Size:%d", dwPacking, dwSize); if (IsTdNested(flags)) { mdTypeDef tkEnclosingClass; hr = m_pImport->GetNestedClassProps(inTypeDef, &tkEnclosingClass); if (hr == S_OK) { VWriteLine("\tEnclosingClass : %ls (%8.8X)", TypeDeforRefName(tkEnclosingClass, szTempBuf, ARRAY_SIZE(szTempBuf)), tkEnclosingClass); } else if (hr == CLDB_E_RECORD_NOTFOUND) WriteLine("ERROR: EnclosingClass not found for NestedClass"); else Error("GetNestedClassProps failed.", hr); } } // void MDInfo::DisplayTypeDefProps() // Prints out the name of the given TypeRef // LPCWSTR MDInfo::TypeRefName(mdTypeRef tr, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen) { HRESULT hr; hr = m_pImport->GetTypeRefProps( tr, // The class ref token. NULL, // Resolution scope. buffer, // Put the name here. bufLen, // Size of the name buffer, wide chars. NULL); // Put actual size of name here. if (FAILED(hr)) { swprintf_s(buffer, bufLen, W("[Invalid TypeRef]")); } return (buffer); } // LPCWSTR MDInfo::TypeRefName() // Prints out all the info of the given TypeRef // void MDInfo::DisplayTypeRefInfo(mdTypeRef tr) { HRESULT hr; mdToken tkResolutionScope; WCHAR typeRefName[STRING_BUFFER_LEN]; ULONG nameLen; hr = m_pImport->GetTypeRefProps( tr, // The class ref token. &tkResolutionScope, // ResolutionScope. typeRefName, // Put the name here. STRING_BUFFER_LEN, // Size of the name buffer, wide chars. &nameLen); // Put actual size of name here. if (FAILED(hr)) Error("GetTypeRefProps failed.", hr); VWriteLine("Token: 0x%08x", tr); VWriteLine("ResolutionScope: 0x%08x", tkResolutionScope); VWriteLine("TypeRefName: %ls",typeRefName); DisplayCustomAttributes(tr, "\t"); } // void MDInfo::DisplayTypeRefInfo() void MDInfo::DisplayTypeSpecInfo(mdTypeSpec ts, const char *preFix) { HRESULT hr; PCCOR_SIGNATURE pvSig; ULONG cbSig; ULONG cb; InitSigBuffer(); hr = m_pImport->GetTypeSpecFromToken( ts, // The class ref token. &pvSig, &cbSig); if (FAILED(hr)) Error("GetTypeSpecFromToken failed.", hr); // DisplaySignature(pvSig, cbSig, preFix); if (FAILED(hr = GetOneElementType(pvSig, cbSig, &cb))) goto ErrExit; VWriteLine("%s\tTypeSpec :%s", preFix, (LPSTR)m_sigBuf.Ptr()); // Hex, too? if (m_DumpFilter & dumpMoreHex) { char rcNewPrefix[80]; sprintf_s(rcNewPrefix, 80, "%s\tSignature", preFix); DumpHex(rcNewPrefix, pvSig, cbSig, false, 24); } ErrExit: return; } // void MDInfo::DisplayTypeSpecInfo() void MDInfo::DisplayMethodSpecInfo(mdMethodSpec ms, const char *preFix) { HRESULT hr; PCCOR_SIGNATURE pvSig; ULONG cbSig; mdToken tk; InitSigBuffer(); hr = m_pImport->GetMethodSpecProps( ms, // The MethodSpec token &tk, // The MethodDef or MemberRef &pvSig, // Signature. &cbSig); // Size of signature. VWriteLine("%s\tParent : 0x%08x", preFix, tk); DisplaySignature(pvSig, cbSig, preFix); //ErrExit: return; } // void MDInfo::DisplayMethodSpecInfo() // Return the passed-in buffer filled with a string detailing the class flags // associated with the class. // char *MDInfo::ClassFlags(DWORD flags, _Out_writes_(STRING_BUFFER_LEN) char *sFlags) { sFlags[0] = 0; ISFLAG(Td, NotPublic); ISFLAG(Td, Public); ISFLAG(Td, NestedPublic); ISFLAG(Td, NestedPrivate); ISFLAG(Td, NestedFamily); ISFLAG(Td, NestedAssembly); ISFLAG(Td, NestedFamANDAssem); ISFLAG(Td, NestedFamORAssem); ISFLAG(Td, AutoLayout); ISFLAG(Td, SequentialLayout); ISFLAG(Td, ExplicitLayout); ISFLAG(Td, Class); ISFLAG(Td, Interface); ISFLAG(Td, Abstract); ISFLAG(Td, Sealed); ISFLAG(Td, SpecialName); ISFLAG(Td, Import); ISFLAG(Td, Serializable); ISFLAG(Td, AnsiClass); ISFLAG(Td, UnicodeClass); ISFLAG(Td, AutoClass); ISFLAG(Td, BeforeFieldInit); ISFLAG(Td, Forwarder); // "Reserved" flags ISFLAG(Td, RTSpecialName); ISFLAG(Td, HasSecurity); ISFLAG(Td, WindowsRuntime); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); return sFlags; } // char *MDInfo::ClassFlags() // prints out all info on the given typeDef, including all information that // is specific to a given typedef // void MDInfo::DisplayTypeDefInfo(mdTypeDef inTypeDef) { DisplayTypeDefProps(inTypeDef); // Get field layout information. HRESULT hr = NOERROR; COR_FIELD_OFFSET *rFieldOffset = NULL; ULONG cFieldOffset = 0; hr = m_pImport->GetClassLayout(inTypeDef, NULL, rFieldOffset, 0, &cFieldOffset, NULL); if (SUCCEEDED(hr) && cFieldOffset) { rFieldOffset = new COR_FIELD_OFFSET[cFieldOffset]; if (rFieldOffset == NULL) Error("_calloc failed.", E_OUTOFMEMORY); hr = m_pImport->GetClassLayout(inTypeDef, NULL, rFieldOffset, cFieldOffset, &cFieldOffset, NULL); if (FAILED(hr)) { delete [] rFieldOffset; Error("GetClassLayout() failed.", hr); } } //No reason to display members if we're displaying fields and methods separately DisplayGenericParams(inTypeDef, "\t"); DisplayFields(inTypeDef, rFieldOffset, cFieldOffset); delete [] rFieldOffset; DisplayMethods(inTypeDef); DisplayProperties(inTypeDef); DisplayEvents(inTypeDef); DisplayMethodImpls(inTypeDef); DisplayPermissions(inTypeDef, ""); DisplayInterfaceImpls(inTypeDef); DisplayCustomAttributes(inTypeDef, "\t"); } // void MDInfo::DisplayTypeDefInfo() // print out information about every the given typeDef's interfaceImpls // void MDInfo::DisplayInterfaceImpls(mdTypeDef inTypeDef) { HCORENUM interfaceImplEnum = NULL; mdTypeRef interfaceImpls[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; HRESULT hr; while(SUCCEEDED(hr = m_pImport->EnumInterfaceImpls( &interfaceImplEnum, inTypeDef,interfaceImpls,ARRAY_SIZE(interfaceImpls), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("\tInterfaceImpl #%d (%08x)", totalCount, interfaceImpls[i]); WriteLine("\t-------------------------------------------------------"); DisplayInterfaceImplInfo(interfaceImpls[i]); DisplayPermissions(interfaceImpls[i], "\t"); WriteLine(""); } } m_pImport->CloseEnum( interfaceImplEnum); } // void MDInfo::DisplayInterfaceImpls() // print the information for the given interface implementation // void MDInfo::DisplayInterfaceImplInfo(mdInterfaceImpl inImpl) { mdTypeDef typeDef; mdToken token; HRESULT hr; WCHAR szTempBuf[STRING_BUFFER_LEN]; hr = m_pImport->GetInterfaceImplProps( inImpl, &typeDef, &token); if (FAILED(hr)) Error("GetInterfaceImplProps failed.", hr); VWriteLine("\t\tClass : %ls",TypeDeforRefName(typeDef, szTempBuf, ARRAY_SIZE(szTempBuf))); VWriteLine("\t\tToken : %8.8X [%s] %ls",token,TokenTypeName(token), TypeDeforRefName(token, szTempBuf, ARRAY_SIZE(szTempBuf))); DisplayCustomAttributes(inImpl, "\t\t"); } // void MDInfo::DisplayInterfaceImplInfo() // displays the information for a particular property // void MDInfo::DisplayPropertyInfo(mdProperty inProp) { HRESULT hr; mdTypeDef typeDef; WCHAR propName[STRING_BUFFER_LEN]; DWORD flags; #ifdef FEATURE_COMINTEROP VARIANT defaultValue; #endif void const *pValue; ULONG cbValue; DWORD dwCPlusTypeFlag; mdMethodDef setter, getter, otherMethod[ENUM_BUFFER_SIZE]; ULONG others; PCCOR_SIGNATURE pbSigBlob; ULONG ulSigBlob; #ifdef FEATURE_COMINTEROP ::VariantInit(&defaultValue); #endif hr = m_pImport->GetPropertyProps( inProp, // [IN] property token &typeDef, // [OUT] typedef containing the property declarion. propName, // [OUT] Property name STRING_BUFFER_LEN, // [IN] the count of wchar of szProperty NULL, // [OUT] actual count of wchar for property name &flags, // [OUT] property flags. &pbSigBlob, // [OUT] Signature Blob. &ulSigBlob, // [OUT] Number of bytes in the signature blob. &dwCPlusTypeFlag, // [OUT] default value &pValue, &cbValue, &setter, // [OUT] setter method of the property &getter, // [OUT] getter method of the property otherMethod, // [OUT] other methods of the property ENUM_BUFFER_SIZE, // [IN] size of rmdOtherMethod &others); // [OUT] total number of other method of this property if (FAILED(hr)) Error("GetPropertyProps failed.", hr); VWriteLine("\t\tProp.Name : %ls (%8.8X)",propName,inProp); char sFlags[STRING_BUFFER_LEN]; sFlags[0] = 0; ISFLAG(Pr, SpecialName); ISFLAG(Pr, RTSpecialName); ISFLAG(Pr, HasDefault); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); VWriteLine("\t\tFlags : %s (%08x)", sFlags, flags); if (ulSigBlob) DisplaySignature(pbSigBlob, ulSigBlob, ""); else VWriteLine("\t\tERROR: no valid signature "); WCHAR szTempBuf[STRING_BUFFER_LEN]; #ifdef FEATURE_COMINTEROP _FillVariant((BYTE)dwCPlusTypeFlag, pValue, cbValue, &defaultValue); VWriteLine("\t\tDefltValue: %ls",VariantAsString(&defaultValue)); #endif VWriteLine("\t\tSetter : (%08x) %ls",setter,MemberDeforRefName(setter, szTempBuf, ARRAY_SIZE(szTempBuf))); VWriteLine("\t\tGetter : (%08x) %ls",getter,MemberDeforRefName(getter, szTempBuf, ARRAY_SIZE(szTempBuf))); // do something with others? VWriteLine("\t\t%ld Others",others); DisplayCustomAttributes(inProp, "\t\t"); #ifdef FEATURE_COMINTEROP ::VariantClear(&defaultValue); #endif } // void MDInfo::DisplayPropertyInfo() // displays info for each property // void MDInfo::DisplayProperties(mdTypeDef inTypeDef) { HCORENUM propEnum = NULL; mdProperty props[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; HRESULT hr; while(SUCCEEDED(hr = m_pImport->EnumProperties( &propEnum, inTypeDef,props,ARRAY_SIZE(props), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("\tProperty #%d (%08x)", totalCount, props[i]); WriteLine("\t-------------------------------------------------------"); DisplayPropertyInfo(props[i]); DisplayPermissions(props[i], "\t"); WriteLine(""); } } m_pImport->CloseEnum( propEnum); } // void MDInfo::DisplayProperties() // Display all information about a particular event // void MDInfo::DisplayEventInfo(mdEvent inEvent) { HRESULT hr; mdTypeDef typeDef; WCHAR eventName[STRING_BUFFER_LEN]; DWORD flags; mdToken eventType; mdMethodDef addOn, removeOn, fire, otherMethod[ENUM_BUFFER_SIZE]; ULONG totalOther; hr = m_pImport->GetEventProps( // [IN] The scope. inEvent, // [IN] event token &typeDef, // [OUT] typedef containing the event declarion. eventName, // [OUT] Event name STRING_BUFFER_LEN, // [IN] the count of wchar of szEvent NULL, // [OUT] actual count of wchar for event's name &flags, // [OUT] Event flags. &eventType, // [OUT] EventType class &addOn, // [OUT] AddOn method of the event &removeOn, // [OUT] RemoveOn method of the event &fire, // [OUT] Fire method of the event otherMethod, // [OUT] other method of the event ARRAY_SIZE(otherMethod), // [IN] size of rmdOtherMethod &totalOther); // [OUT] total number of other method of this event if (FAILED(hr)) Error("GetEventProps failed.", hr); VWriteLine("\t\tName : %ls (%8.8X)",eventName,inEvent); char sFlags[STRING_BUFFER_LEN]; sFlags[0] = 0; ISFLAG(Ev, SpecialName); ISFLAG(Ev, RTSpecialName); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); VWriteLine("\t\tFlags : %s (%08x)", sFlags, flags); WCHAR szTempBuf[STRING_BUFFER_LEN]; VWriteLine("\t\tEventType : %8.8X [%s]",eventType,TokenTypeName(eventType)); VWriteLine("\t\tAddOnMethd: (%08x) %ls",addOn,MemberDeforRefName(addOn, szTempBuf, ARRAY_SIZE(szTempBuf))); VWriteLine("\t\tRmvOnMethd: (%08x) %ls",removeOn,MemberDeforRefName(removeOn, szTempBuf, ARRAY_SIZE(szTempBuf))); VWriteLine("\t\tFireMethod: (%08x) %ls",fire,MemberDeforRefName(fire, szTempBuf, ARRAY_SIZE(szTempBuf))); VWriteLine("\t\t%ld OtherMethods",totalOther); DisplayCustomAttributes(inEvent, "\t\t"); } // void MDInfo::DisplayEventInfo() // Display information about all events in a typedef // void MDInfo::DisplayEvents(mdTypeDef inTypeDef) { HCORENUM eventEnum = NULL; mdProperty events[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; HRESULT hr; while(SUCCEEDED(hr = m_pImport->EnumEvents( &eventEnum, inTypeDef,events,ARRAY_SIZE(events), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("\tEvent #%d (%08x)", totalCount, events[i]); WriteLine("\t-------------------------------------------------------"); DisplayEventInfo(events[i]); DisplayPermissions(events[i], "\t"); WriteLine(""); } } m_pImport->CloseEnum( eventEnum); } // void MDInfo::DisplayEvents() // print info for the passed-in custom attribute // This function is used to print the custom attribute information for both TypeDefs and // MethodDefs which need slightly different formatting. preFix helps fix it up. // void MDInfo::DisplayCustomAttributeInfo(mdCustomAttribute inValue, const char *preFix) { const BYTE *pValue; // The custom value. ULONG cbValue; // Length of the custom value. HRESULT hr; // A result. mdToken tkObj; // Attributed object. mdToken tkType; // Type of the custom attribute. mdToken tk; // For name lookup. LPCUTF8 pMethName=0; // Name of custom attribute ctor, if any. CQuickBytes qSigName; // Buffer to pretty-print signature. PCCOR_SIGNATURE pSig=0; // Signature of ctor. ULONG cbSig; // Size of the signature. BOOL bCoffSymbol = false; // true for coff symbol CA's. WCHAR rcName[MAX_CLASS_NAME]; // Name of the type. hr = m_pImport->GetCustomAttributeProps( // S_OK or error. inValue, // The attribute. &tkObj, // The attributed object &tkType, // The attributes type. (const void**)&pValue, // Put pointer to data here. &cbValue); // Put size here. if (FAILED(hr)) Error("GetCustomAttributeProps failed.", hr); VWriteLine("%s\tCustomAttribute Type: %08x", preFix, tkType); // Get the name of the memberref or methoddef. tk = tkType; rcName[0] = L'\0'; // Get the member name, and the parent token. switch (TypeFromToken(tk)) { case mdtMemberRef: hr = m_pImport->GetNameFromToken(tk, &pMethName); if (FAILED(hr)) Error("GetNameFromToken failed.", hr); hr = m_pImport->GetMemberRefProps( tk, &tk, 0, 0, 0, &pSig, &cbSig); if (FAILED(hr)) Error("GetMemberRefProps failed.", hr); break; case mdtMethodDef: hr = m_pImport->GetNameFromToken(tk, &pMethName); if (FAILED(hr)) Error("GetNameFromToken failed.", hr); hr = m_pImport->GetMethodProps(tk, &tk, 0, 0, 0, 0, &pSig, &cbSig, 0, 0); if (FAILED(hr)) Error("GetMethodProps failed.", hr); break; } // switch // Get the type name. switch (TypeFromToken(tk)) { case mdtTypeDef: hr = m_pImport->GetTypeDefProps(tk, rcName,MAX_CLASS_NAME,0, 0,0); if (FAILED(hr)) Error("GetTypeDefProps failed.", hr); break; case mdtTypeRef: hr = m_pImport->GetTypeRefProps(tk, 0, rcName,MAX_CLASS_NAME,0); if (FAILED(hr)) Error("GetTypeRefProps failed.", hr); break; } // switch if (pSig && pMethName) { int iLen; LPWSTR pwzName = (LPWSTR)(new WCHAR[iLen= 1+(ULONG32)strlen(pMethName)]); if(pwzName) { WszMultiByteToWideChar(CP_UTF8,0, pMethName,-1, pwzName,iLen); PrettyPrintSigLegacy(pSig, cbSig, pwzName, &qSigName, m_pImport); delete [] pwzName; } } VWrite("%s\tCustomAttributeName: %ls", preFix, rcName); if (pSig && pMethName) VWrite(" :: %S", qSigName.Ptr()); // Keep track of coff overhead. if (!wcscmp(W("__DecoratedName"), rcName)) { bCoffSymbol = true; g_cbCoffNames += cbValue + 6; } WriteLine(""); VWriteLine("%s\tLength: %ld", preFix, cbValue); char newPreFix[40]; sprintf_s(newPreFix, 40, "%s\tValue ", preFix); DumpHex(newPreFix, pValue, cbValue); if (bCoffSymbol) VWriteLine("%s\t %s", preFix, pValue); // Try to decode the constructor blob. This is incomplete, but covers the most popular cases. if (pSig) { // Interpret the signature. PCCOR_SIGNATURE ps = pSig; ULONG cb; ULONG ulData; ULONG cParams; ULONG ulVal; UINT8 u1 = 0; UINT16 u2 = 0; UINT32 u4 = 0; UINT64 u8 = 0; unsigned __int64 uI64; double dblVal; ULONG cbVal; LPCUTF8 pStr; CustomAttributeParser CA(pValue, cbValue); CA.ValidateProlog(); // Get the calling convention. cb = CorSigUncompressData(ps, &ulData); ps += cb; // Get the count of params. cb = CorSigUncompressData(ps, &cParams); ps += cb; // Get the return value. cb = CorSigUncompressData(ps, &ulData); ps += cb; if (ulData == ELEMENT_TYPE_VOID) { VWrite("%s\tctor args: (", preFix); // For each param... for (ULONG i=0; i<cParams; ++i) { // Get the next param type. cb = CorSigUncompressData(ps, &ulData); ps += cb; if (i) Write(", "); DoObject: switch (ulData) { // For ET_OBJECT, the next byte in the blob is the ET of the actual data. case ELEMENT_TYPE_OBJECT: CA.GetU1(&u1); ulData = u1; goto DoObject; case ELEMENT_TYPE_I1: case ELEMENT_TYPE_U1: CA.GetU1(&u1); ulVal = u1; goto PrintVal; case ELEMENT_TYPE_I2: case ELEMENT_TYPE_U2: CA.GetU2(&u2); ulVal = u2; goto PrintVal; case ELEMENT_TYPE_I4: case ELEMENT_TYPE_U4: CA.GetU4(&u4); ulVal = u4; PrintVal: VWrite("%d", ulVal); break; case ELEMENT_TYPE_STRING: CA.GetString(&pStr, &cbVal); VWrite("\"%s\"", pStr); break; // The only class type that we accept is Type, which is stored as a string. case ELEMENT_TYPE_CLASS: // Eat the class type. cb = CorSigUncompressData(ps, &ulData); ps += cb; // Get the name of the type. CA.GetString(&pStr, &cbVal); VWrite("typeof(%s)", pStr); break; case SERIALIZATION_TYPE_TYPE: CA.GetString(&pStr, &cbVal); VWrite("typeof(%s)", pStr); break; case ELEMENT_TYPE_I8: case ELEMENT_TYPE_U8: CA.GetU8(&u8); uI64 = u8; VWrite("%#lx", uI64); break; case ELEMENT_TYPE_R4: dblVal = CA.GetR4(); VWrite("%f", dblVal); break; case ELEMENT_TYPE_R8: dblVal = CA.GetR8(); VWrite("%f", dblVal); break; default: // bail... i = cParams; Write(" <can not decode> "); break; } } WriteLine(")"); } } WriteLine(""); } // void MDInfo::DisplayCustomAttributeInfo() // Print all custom values for the given token // This function is used to print the custom value information for all tokens. // which need slightly different formatting. preFix helps fix it up. // void MDInfo::DisplayCustomAttributes(mdToken inToken, const char *preFix) { HCORENUM customAttributeEnum = NULL; mdTypeRef customAttributes[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; HRESULT hr; while(SUCCEEDED(hr = m_pImport->EnumCustomAttributes( &customAttributeEnum, inToken, 0, customAttributes, ARRAY_SIZE(customAttributes), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("%sCustomAttribute #%d (%08x)", preFix, totalCount, customAttributes[i]); VWriteLine("%s-------------------------------------------------------", preFix); DisplayCustomAttributeInfo(customAttributes[i], preFix); } } m_pImport->CloseEnum( customAttributeEnum); } // void MDInfo::DisplayCustomAttributes() // Show the passed-in token's permissions // // void MDInfo::DisplayPermissions(mdToken tk, const char *preFix) { HCORENUM permissionEnum = NULL; mdPermission permissions[ENUM_BUFFER_SIZE]; ULONG count, totalCount = 1; HRESULT hr; while (SUCCEEDED(hr = m_pImport->EnumPermissionSets( &permissionEnum, tk, 0, permissions, ARRAY_SIZE(permissions), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("%s\tPermission #%d (%08x)", preFix, totalCount, permissions[i]); VWriteLine("%s\t-------------------------------------------------------", preFix); DisplayPermissionInfo(permissions[i], preFix); WriteLine(""); } } m_pImport->CloseEnum( permissionEnum); } // void MDInfo::DisplayPermissions() // print properties of given rolecheck // // void MDInfo::DisplayPermissionInfo(mdPermission inPermission, const char *preFix) { DWORD dwAction; const BYTE *pvPermission; ULONG cbPermission; const char *flagDesc = NULL; char newPreFix[STRING_BUFFER_LEN]; HRESULT hr; hr = m_pImport->GetPermissionSetProps( inPermission, &dwAction, (const void**)&pvPermission, &cbPermission); if (FAILED(hr)) Error("GetPermissionSetProps failed.", hr); switch(dwAction) { case dclActionNil: flagDesc = "ActionNil"; break; case dclRequest: flagDesc = "Request"; break; case dclDemand: flagDesc = "Demand"; break; case dclAssert: flagDesc = "Assert"; break; case dclDeny: flagDesc = "Deny"; break; case dclPermitOnly: flagDesc = "PermitOnly"; break; case dclLinktimeCheck: flagDesc = "LinktimeCheck"; break; case dclInheritanceCheck: flagDesc = "InheritanceCheck"; break; case dclRequestMinimum: flagDesc = "RequestMinimum"; break; case dclRequestOptional: flagDesc = "RequestOptional"; break; case dclRequestRefuse: flagDesc = "RequestRefuse"; break; case dclPrejitGrant: flagDesc = "PrejitGrant"; break; case dclPrejitDenied: flagDesc = "PrejitDenied"; break; case dclNonCasDemand: flagDesc = "NonCasDemand"; break; case dclNonCasLinkDemand: flagDesc = "NonCasLinkDemand"; break; case dclNonCasInheritance: flagDesc = "NonCasInheritance"; break; } VWriteLine("%s\t\tAction : %s", preFix, flagDesc); VWriteLine("%s\t\tBlobLen : %d", preFix, cbPermission); if (cbPermission) { sprintf_s(newPreFix, STRING_BUFFER_LEN, "%s\tBlob", preFix); DumpHex(newPreFix, pvPermission, cbPermission, false, 24); } sprintf_s (newPreFix, STRING_BUFFER_LEN, "\t\t%s", preFix); DisplayCustomAttributes(inPermission, newPreFix); } // void MDInfo::DisplayPermissionInfo() // simply prints out the given GUID in standard form LPWSTR MDInfo::GUIDAsString(GUID inGuid, _Out_writes_(bufLen) LPWSTR guidString, ULONG bufLen) { StringFromGUID2(inGuid, guidString, bufLen); return guidString; } // LPWSTR MDInfo::GUIDAsString() #ifdef FEATURE_COMINTEROP LPCWSTR MDInfo::VariantAsString(VARIANT *pVariant) { HRESULT hr = S_OK; if (V_VT(pVariant) == VT_UNKNOWN) { _ASSERTE(V_UNKNOWN(pVariant) == NULL); return W("<NULL>"); } else if (SUCCEEDED(hr = ::VariantChangeType(pVariant, pVariant, 0, VT_BSTR))) return V_BSTR(pVariant); else if (hr == DISP_E_BADVARTYPE && V_VT(pVariant) == VT_I8) { // allocate the bstr. char szStr[32]; WCHAR wszStr[32]; // Set variant type to bstr. V_VT(pVariant) = VT_BSTR; // Create the ansi string. sprintf_s(szStr, 32, "%I64d", V_CY(pVariant).int64); // Convert to unicode. WszMultiByteToWideChar(CP_ACP, 0, szStr, -1, wszStr, 32); // convert to bstr and set variant value. V_BSTR(pVariant) = ::SysAllocString(wszStr); if (V_BSTR(pVariant) == NULL) Error("SysAllocString() failed.", E_OUTOFMEMORY); return V_BSTR(pVariant); } else return W("ERROR"); } // LPWSTR MDInfo::VariantAsString() #endif bool TrySigUncompress(PCCOR_SIGNATURE pData, // [IN] compressed data ULONG *pDataOut, // [OUT] the expanded *pData ULONG *cbCur) { ULONG ulSize = CorSigUncompressData(pData, pDataOut); if (ulSize == (ULONG)-1) { *cbCur = ulSize; return false; } else { *cbCur += ulSize; return true; } } void MDInfo::DisplayFieldMarshal(mdToken inToken) { PCCOR_SIGNATURE pvNativeType; // [OUT] native type of this field ULONG cbNativeType; // [OUT] the count of bytes of *ppvNativeType HRESULT hr; hr = m_pImport->GetFieldMarshal( inToken, &pvNativeType, &cbNativeType); if (FAILED(hr) && hr != CLDB_E_RECORD_NOTFOUND) Error("GetFieldMarshal failed.", hr); if (hr != CLDB_E_RECORD_NOTFOUND) { ULONG cbCur = 0; ULONG ulData; ULONG ulStrLoc; char szNTDesc[STRING_BUFFER_LEN]; while (cbCur < cbNativeType) { ulStrLoc = 0; ulData = NATIVE_TYPE_MAX; if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur)) continue; if (ulData >= sizeof(g_szNativeType)/sizeof(*g_szNativeType)) { cbCur = (ULONG)-1; continue; } ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "%s ", g_szNativeType[ulData]); switch (ulData) { case NATIVE_TYPE_FIXEDSYSSTRING: { if (cbCur < cbNativeType) { if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur)) continue; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{StringElementCount: %d} ",ulData); } } break; case NATIVE_TYPE_FIXEDARRAY: { if (cbCur < cbNativeType) { if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur)) continue; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{ArrayElementCount: %d",ulData); if (cbCur < cbNativeType) { if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur)) continue; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, ", ArrayElementType(NT): %d",ulData); } ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc,"}"); } } break; case NATIVE_TYPE_ARRAY: { if (cbCur < cbNativeType) { BOOL bElemTypeSpecified; if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur)) continue; if (ulData != NATIVE_TYPE_MAX) { ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{ArrayElementType(NT): %d", ulData); bElemTypeSpecified = TRUE; } else { ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{"); bElemTypeSpecified = FALSE; } if (cbCur < cbNativeType) { if (bElemTypeSpecified) ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, ", "); if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur)) continue; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "SizeParamIndex: %d",ulData); if (cbCur < cbNativeType) { if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur)) continue; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, ", SizeParamMultiplier: %d",ulData); if (cbCur < cbNativeType) { if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur)) continue; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, ", SizeConst: %d",ulData); } } } ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "}"); } } break; case NATIVE_TYPE_SAFEARRAY: { if (cbCur < cbNativeType) { if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur)) continue; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{SafeArraySubType(VT): %d, ",ulData); // Extract the element type name if it is specified. if (cbCur < cbNativeType) { LPUTF8 strTemp = NULL; int strLen = 0; int ByteCountLength = 0; strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength); cbCur += ByteCountLength; strTemp = (LPUTF8)(new char[strLen + 1]); if(strTemp) { memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen); strTemp[strLen] = 0; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "ElementTypeName: %s}", strTemp); cbCur += strLen; _ASSERTE(cbCur == cbNativeType); delete [] strTemp; } } else { ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "ElementTypeName: }"); } } } break; case NATIVE_TYPE_CUSTOMMARSHALER: { LPUTF8 strTemp = NULL; int strLen = 0; int ByteCountLength = 0; // Extract the typelib GUID. strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength); cbCur += ByteCountLength; strTemp = (LPUTF8)(new char[strLen + 1]); if(strTemp) { memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen); strTemp[strLen] = 0; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{Typelib: %s, ", strTemp); cbCur += strLen; _ASSERTE(cbCur < cbNativeType); delete [] strTemp; } // Extract the name of the native type. strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength); cbCur += ByteCountLength; strTemp = (LPUTF8)(new char[strLen + 1]); if(strTemp) { memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen); strTemp[strLen] = 0; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "Native: %s, ", strTemp); cbCur += strLen; _ASSERTE(cbCur < cbNativeType); delete [] strTemp; } // Extract the name of the custom marshaler. strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength); cbCur += ByteCountLength; strTemp = (LPUTF8)(new char[strLen + 1]); if(strTemp) { memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen); strTemp[strLen] = 0; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "Marshaler: %s, ", strTemp); cbCur += strLen; _ASSERTE(cbCur < cbNativeType); delete [] strTemp; } // Extract the cookie string. strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength); cbCur += ByteCountLength; if (strLen > 0) { strTemp = (LPUTF8)(new char[strLen + 1]); if(strTemp) { memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen); strTemp[strLen] = 0; ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "Cookie: "); // Copy the cookie string and transform the embedded nulls into \0's. for (int i = 0; i < strLen - 1; i++, cbCur++) { if (strTemp[i] == 0) ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "\\0"); else szNTDesc[ulStrLoc++] = strTemp[i]; } szNTDesc[ulStrLoc++] = strTemp[strLen - 1]; cbCur++; delete [] strTemp; } } else { ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "Cookie: "); } // Finish the custom marshaler native type description. ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "}"); _ASSERTE(cbCur <= cbNativeType); } break; default: { // normal nativetype element: do nothing } } VWriteLine("\t\t\t\t%s",szNTDesc); if (ulData >= NATIVE_TYPE_MAX) break; } if (cbCur == (ULONG)-1) { // There was something that we didn't grok in the signature. // Just dump out the blob as hex VWrite("\t\t\t\t{", szNTDesc); while (cbNativeType--) VWrite(" %2.2X", *pvNativeType++); VWriteLine(" }"); } } } // void MDInfo::DisplayFieldMarshal() void MDInfo::DisplayPinvokeInfo(mdToken inToken) { HRESULT hr = NOERROR; DWORD flags; WCHAR rcImport[512]; mdModuleRef tkModuleRef; char sFlags[STRING_BUFFER_LEN]; hr = m_pImport->GetPinvokeMap(inToken, &flags, rcImport, ARRAY_SIZE(rcImport), 0, &tkModuleRef); if (FAILED(hr)) { if (hr != CLDB_E_RECORD_NOTFOUND) VWriteLine("ERROR: GetPinvokeMap failed.", hr); return; } WriteLine("\t\tPinvoke Map Data:"); VWriteLine("\t\tEntry point: %S", rcImport); VWriteLine("\t\tModule ref: %08x", tkModuleRef); sFlags[0] = 0; ISFLAG(Pm, NoMangle); ISFLAG(Pm, CharSetNotSpec); ISFLAG(Pm, CharSetAnsi); ISFLAG(Pm, CharSetUnicode); ISFLAG(Pm, CharSetAuto); ISFLAG(Pm, SupportsLastError); ISFLAG(Pm, CallConvWinapi); ISFLAG(Pm, CallConvCdecl); ISFLAG(Pm, CallConvStdcall); ISFLAG(Pm, CallConvThiscall); ISFLAG(Pm, CallConvFastcall); ISFLAG(Pm, BestFitEnabled); ISFLAG(Pm, BestFitDisabled); ISFLAG(Pm, BestFitUseAssem); ISFLAG(Pm, ThrowOnUnmappableCharEnabled); ISFLAG(Pm, ThrowOnUnmappableCharDisabled); ISFLAG(Pm, ThrowOnUnmappableCharUseAssem); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); VWriteLine("\t\tMapping flags: %s (%08x)", sFlags, flags); } // void MDInfo::DisplayPinvokeInfo() ///////////////////////////////////////////////////////////////////////// // void DisplaySignature(PCCOR_SIGNATURE pbSigBlob, ULONG ulSigBlob); // // Display COM+ signature -- taken from cordump.cpp's DumpSignature ///////////////////////////////////////////////////////////////////////// void MDInfo::DisplaySignature(PCCOR_SIGNATURE pbSigBlob, ULONG ulSigBlob, const char *preFix) { ULONG cbCur = 0; ULONG cb; // 428793: Prefix complained correctly about unitialized data. ULONG ulData = (ULONG) IMAGE_CEE_CS_CALLCONV_MAX; ULONG ulArgs; HRESULT hr = NOERROR; ULONG ulSigBlobStart = ulSigBlob; // initialize sigBuf InitSigBuffer(); cb = CorSigUncompressData(pbSigBlob, &ulData); VWriteLine("%s\t\tCallCnvntn: %s", preFix, (g_strCalling[ulData & IMAGE_CEE_CS_CALLCONV_MASK])); if (cb>ulSigBlob) goto ErrExit; cbCur += cb; ulSigBlob -= cb; if (ulData & IMAGE_CEE_CS_CALLCONV_HASTHIS) VWriteLine("%s\t\thasThis ", preFix); if (ulData & IMAGE_CEE_CS_CALLCONV_EXPLICITTHIS) VWriteLine("%s\t\texplicit ", preFix); if (ulData & IMAGE_CEE_CS_CALLCONV_GENERIC) VWriteLine("%s\t\tgeneric ", preFix); // initialize sigBuf InitSigBuffer(); if ( isCallConv(ulData,IMAGE_CEE_CS_CALLCONV_FIELD) ) { // display field type if (FAILED(hr = GetOneElementType(&pbSigBlob[cbCur], ulSigBlob, &cb))) goto ErrExit; VWriteLine("%s\t\tField type: %s", preFix, (LPSTR)m_sigBuf.Ptr()); if (cb>ulSigBlob) goto ErrExit; cbCur += cb; ulSigBlob -= cb; } else { if (ulData & IMAGE_CEE_CS_CALLCONV_GENERIC) { ULONG ulTyArgs; cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulTyArgs); if (cb>ulSigBlob) goto ErrExit; cbCur += cb; ulSigBlob -= cb; VWriteLine("%s\t\tType Arity:%d ", preFix, ulTyArgs); } cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulArgs); if (cb>ulSigBlob) goto ErrExit; cbCur += cb; ulSigBlob -= cb; if (ulData != IMAGE_CEE_CS_CALLCONV_LOCAL_SIG && ulData != IMAGE_CEE_CS_CALLCONV_GENERICINST) { // display return type when it is not a local varsig if (FAILED(hr = GetOneElementType(&pbSigBlob[cbCur], ulSigBlob, &cb))) goto ErrExit; VWriteLine("%s\t\tReturnType:%s", preFix, (LPSTR)m_sigBuf.Ptr()); if (cb>ulSigBlob) goto ErrExit; cbCur += cb; ulSigBlob -= cb; } // display count of argument // display arguments if (ulSigBlob) VWriteLine("%s\t\t%ld Arguments", preFix, ulArgs); else VWriteLine("%s\t\tNo arguments.", preFix); ULONG i = 0; while (i < ulArgs && ulSigBlob > 0) { ULONG ulDataTemp; // Handle the sentinal for varargs because it isn't counted in the args. CorSigUncompressData(&pbSigBlob[cbCur], &ulDataTemp); ++i; // initialize sigBuf InitSigBuffer(); if (FAILED(hr = GetOneElementType(&pbSigBlob[cbCur], ulSigBlob, &cb))) goto ErrExit; VWriteLine("%s\t\t\tArgument #%ld: %s",preFix, i, (LPSTR)m_sigBuf.Ptr()); if (cb>ulSigBlob) goto ErrExit; cbCur += cb; ulSigBlob -= cb; } } // Nothing consumed but not yet counted. cb = 0; ErrExit: // We should have consumed all signature blob. If not, dump the sig in hex. // Also dump in hex if so requested. if (m_DumpFilter & dumpMoreHex || ulSigBlob != 0) { // Did we not consume enough, or try to consume too much? if (cb > ulSigBlob) WriteLine("\tERROR IN SIGNATURE: Signature should be larger."); else if (cb < ulSigBlob) { VWrite("\tERROR IN SIGNATURE: Not all of signature blob was consumed. %d byte(s) remain", ulSigBlob); // If it is short, just append it to the end. if (ulSigBlob < 4) { Write(": "); for (; ulSigBlob; ++cbCur, --ulSigBlob) VWrite("%02x ", pbSigBlob[cbCur]); WriteLine(""); goto ErrExit2; } WriteLine(""); } // Any appropriate error message has been issued. Dump sig in hex, as determined // by error or command line switch. cbCur = 0; ulSigBlob = ulSigBlobStart; char rcNewPrefix[80]; sprintf_s(rcNewPrefix, 80, "%s\t\tSignature ", preFix); DumpHex(rcNewPrefix, pbSigBlob, ulSigBlob, false, 24); } ErrExit2: if (FAILED(hr)) Error("ERROR!! Bad signature blob value!"); return; } // void MDInfo::DisplaySignature() ///////////////////////////////////////////////////////////////////////// // HRESULT GetOneElementType(mdScope tkScope, BYTE *pbSigBlob, ULONG ulSigBlob, ULONG *pcb) // // Adds description of element type to the end of buffer -- caller must ensure // buffer is large enough. ///////////////////////////////////////////////////////////////////////// HRESULT MDInfo::GetOneElementType(PCCOR_SIGNATURE pbSigBlob, ULONG ulSigBlob, ULONG *pcb) { HRESULT hr = S_OK; // A result. ULONG cbCur = 0; ULONG cb; ULONG ulData = ELEMENT_TYPE_MAX; ULONG ulTemp; int iTemp = 0; mdToken tk; cb = CorSigUncompressData(pbSigBlob, &ulData); cbCur += cb; // Handle the modifiers. if (ulData & ELEMENT_TYPE_MODIFIER) { if (ulData == ELEMENT_TYPE_SENTINEL) IfFailGo(AddToSigBuffer("<ELEMENT_TYPE_SENTINEL>")); else if (ulData == ELEMENT_TYPE_PINNED) IfFailGo(AddToSigBuffer("PINNED")); else { hr = E_FAIL; goto ErrExit; } if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb))) goto ErrExit; cbCur += cb; goto ErrExit; } // Handle the underlying element types. if (ulData >= ELEMENT_TYPE_MAX) { hr = E_FAIL; goto ErrExit; } while (ulData == ELEMENT_TYPE_PTR || ulData == ELEMENT_TYPE_BYREF) { IfFailGo(AddToSigBuffer(" ")); IfFailGo(AddToSigBuffer(g_szMapElementType[ulData])); cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData); cbCur += cb; } IfFailGo(AddToSigBuffer(" ")); IfFailGo(AddToSigBuffer(g_szMapElementType[ulData])); if (CorIsPrimitiveType((CorElementType)ulData) || ulData == ELEMENT_TYPE_TYPEDBYREF || ulData == ELEMENT_TYPE_OBJECT || ulData == ELEMENT_TYPE_I || ulData == ELEMENT_TYPE_U) { // If this is a primitive type, we are done goto ErrExit; } if (ulData == ELEMENT_TYPE_VALUETYPE || ulData == ELEMENT_TYPE_CLASS || ulData == ELEMENT_TYPE_CMOD_REQD || ulData == ELEMENT_TYPE_CMOD_OPT) { cb = CorSigUncompressToken(&pbSigBlob[cbCur], &tk); cbCur += cb; // get the name of type ref. Don't care if truncated if (TypeFromToken(tk) == mdtTypeDef || TypeFromToken(tk) == mdtTypeRef) { sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %ls",TypeDeforRefName(tk, m_szTempBuf, ARRAY_SIZE(m_szTempBuf))); IfFailGo(AddToSigBuffer(m_tempFormatBuffer)); } else { _ASSERTE(TypeFromToken(tk) == mdtTypeSpec); sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %8x", tk); IfFailGo(AddToSigBuffer(m_tempFormatBuffer)); } if (ulData == ELEMENT_TYPE_CMOD_REQD || ulData == ELEMENT_TYPE_CMOD_OPT) { if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb))) goto ErrExit; cbCur += cb; } goto ErrExit; } if (ulData == ELEMENT_TYPE_SZARRAY) { // display the base type of SZARRAY if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb))) goto ErrExit; cbCur += cb; goto ErrExit; } // instantiated type if (ulData == ELEMENT_TYPE_GENERICINST) { // display the type constructor if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb))) goto ErrExit; cbCur += cb; ULONG numArgs; cb = CorSigUncompressData(&pbSigBlob[cbCur], &numArgs); cbCur += cb; IfFailGo(AddToSigBuffer("<")); while (numArgs > 0) { if (cbCur > ulSigBlob) goto ErrExit; if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb))) goto ErrExit; cbCur += cb; --numArgs; if (numArgs > 0) IfFailGo(AddToSigBuffer(",")); } IfFailGo(AddToSigBuffer(">")); goto ErrExit; } if (ulData == ELEMENT_TYPE_VAR) { ULONG index; cb = CorSigUncompressData(&pbSigBlob[cbCur], &index); cbCur += cb; sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, "!%d", index); IfFailGo(AddToSigBuffer(m_tempFormatBuffer)); goto ErrExit; } if (ulData == ELEMENT_TYPE_MVAR) { ULONG index; cb = CorSigUncompressData(&pbSigBlob[cbCur], &index); cbCur += cb; sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, "!!%d", index); IfFailGo(AddToSigBuffer(m_tempFormatBuffer)); goto ErrExit; } if (ulData == ELEMENT_TYPE_FNPTR) { cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData); cbCur += cb; if (ulData & IMAGE_CEE_CS_CALLCONV_EXPLICITTHIS) IfFailGo(AddToSigBuffer(" explicit")); if (ulData & IMAGE_CEE_CS_CALLCONV_HASTHIS) IfFailGo(AddToSigBuffer(" hasThis")); IfFailGo(AddToSigBuffer(" ")); IfFailGo(AddToSigBuffer(g_strCalling[ulData & IMAGE_CEE_CS_CALLCONV_MASK])); // Get number of args ULONG numArgs; cb = CorSigUncompressData(&pbSigBlob[cbCur], &numArgs); cbCur += cb; // do return type if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb))) goto ErrExit; cbCur += cb; IfFailGo(AddToSigBuffer("(")); while (numArgs > 0) { if (cbCur > ulSigBlob) goto ErrExit; if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb))) goto ErrExit; cbCur += cb; --numArgs; if (numArgs > 0) IfFailGo(AddToSigBuffer(",")); } IfFailGo(AddToSigBuffer(" )")); goto ErrExit; } if(ulData != ELEMENT_TYPE_ARRAY) return E_FAIL; // display the base type of SDARRAY if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb))) goto ErrExit; cbCur += cb; // display the rank of MDARRAY cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData); cbCur += cb; sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", ulData); IfFailGo(AddToSigBuffer(m_tempFormatBuffer)); if (ulData == 0) // we are done if no rank specified goto ErrExit; // how many dimensions have size specified? cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData); cbCur += cb; sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", ulData); IfFailGo(AddToSigBuffer(m_tempFormatBuffer)); while (ulData) { cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulTemp); sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", ulTemp); IfFailGo(AddToSigBuffer(m_tempFormatBuffer)); cbCur += cb; ulData--; } // how many dimensions have lower bounds specified? cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData); cbCur += cb; sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", ulData); IfFailGo(AddToSigBuffer(m_tempFormatBuffer)); while (ulData) { cb = CorSigUncompressSignedInt(&pbSigBlob[cbCur], &iTemp); sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", iTemp); IfFailGo(AddToSigBuffer(m_tempFormatBuffer)); cbCur += cb; ulData--; } ErrExit: if (cbCur > ulSigBlob) hr = E_FAIL; *pcb = cbCur; return hr; } // HRESULT MDInfo::GetOneElementType() // Display the fields of the N/Direct custom value structure. void MDInfo::DisplayCorNativeLink(COR_NATIVE_LINK *pCorNLnk, const char *preFix) { // Print the LinkType. const char *curField = "\tLink Type : "; switch(pCorNLnk->m_linkType) { case nltNone: VWriteLine("%s%s%s(%02x)", preFix, curField, "nltNone", pCorNLnk->m_linkType); break; case nltAnsi: VWriteLine("%s%s%s(%02x)", preFix, curField, "nltAnsi", pCorNLnk->m_linkType); break; case nltUnicode: VWriteLine("%s%s%s(%02x)", preFix, curField, "nltUnicode", pCorNLnk->m_linkType); break; case nltAuto: VWriteLine("%s%s%s(%02x)", preFix, curField, "nltAuto", pCorNLnk->m_linkType); break; default: _ASSERTE(!"Invalid Native Link Type!"); } // Print the link flags curField = "\tLink Flags : "; switch(pCorNLnk->m_flags) { case nlfNone: VWriteLine("%s%s%s(%02x)", preFix, curField, "nlfNone", pCorNLnk->m_flags); break; case nlfLastError: VWriteLine("%s%s%s(%02x)", preFix, curField, "nlfLastError", pCorNLnk->m_flags); break; default: _ASSERTE(!"Invalid Native Link Flags!"); } // Print the entry point. WCHAR memRefName[STRING_BUFFER_LEN]; HRESULT hr; hr = m_pImport->GetMemberRefProps( pCorNLnk->m_entryPoint, NULL, memRefName, STRING_BUFFER_LEN, NULL, NULL, NULL); if (FAILED(hr)) Error("GetMemberRefProps failed.", hr); VWriteLine("%s\tEntry Point : %ls (0x%08x)", preFix, memRefName, pCorNLnk->m_entryPoint); } // void MDInfo::DisplayCorNativeLink() // Fills given varaint with value given in pValue and of type in bCPlusTypeFlag // // Taken from MetaInternal.cpp HRESULT _FillVariant( BYTE bCPlusTypeFlag, const void *pValue, ULONG cbValue, VARIANT *pvar) { HRESULT hr = NOERROR; switch (bCPlusTypeFlag) { case ELEMENT_TYPE_BOOLEAN: V_VT(pvar) = VT_BOOL; V_BOOL(pvar) = *((BYTE*)pValue); //*((UNALIGNED VARIANT_BOOL *)pValue); break; case ELEMENT_TYPE_I1: V_VT(pvar) = VT_I1; V_I1(pvar) = *((CHAR*)pValue); break; case ELEMENT_TYPE_U1: V_VT(pvar) = VT_UI1; V_UI1(pvar) = *((BYTE*)pValue); break; case ELEMENT_TYPE_I2: V_VT(pvar) = VT_I2; V_I2(pvar) = GET_UNALIGNED_VAL16(pValue); break; case ELEMENT_TYPE_U2: case ELEMENT_TYPE_CHAR: V_VT(pvar) = VT_UI2; V_UI2(pvar) = GET_UNALIGNED_VAL16(pValue); break; case ELEMENT_TYPE_I4: V_VT(pvar) = VT_I4; V_I4(pvar) = GET_UNALIGNED_VAL32(pValue); break; case ELEMENT_TYPE_U4: V_VT(pvar) = VT_UI4; V_UI4(pvar) = GET_UNALIGNED_VAL32(pValue); break; case ELEMENT_TYPE_R4: { V_VT(pvar) = VT_R4; __int32 Value = GET_UNALIGNED_VAL32(pValue); V_R4(pvar) = (float &)Value; } break; case ELEMENT_TYPE_R8: { V_VT(pvar) = VT_R8; __int64 Value = GET_UNALIGNED_VAL64(pValue); V_R8(pvar) = (double &) Value; } break; case ELEMENT_TYPE_STRING: { V_VT(pvar) = VT_BSTR; WCHAR *TempString;; #if BIGENDIAN TempString = (WCHAR *)alloca(cbValue); memcpy(TempString, pValue, cbValue); SwapStringLength(TempString, cbValue/sizeof(WCHAR)); #else TempString = (WCHAR *)pValue; #endif // allocated bstr here V_BSTR(pvar) = ::SysAllocStringLen((LPWSTR)TempString, cbValue/sizeof(WCHAR)); if (V_BSTR(pvar) == NULL) hr = E_OUTOFMEMORY; } break; case ELEMENT_TYPE_CLASS: V_VT(pvar) = VT_UNKNOWN; V_UNKNOWN(pvar) = NULL; // _ASSERTE( GET_UNALIGNED_VAL32(pValue) == 0); break; case ELEMENT_TYPE_I8: V_VT(pvar) = VT_I8; V_CY(pvar).int64 = GET_UNALIGNED_VAL64(pValue); break; case ELEMENT_TYPE_U8: V_VT(pvar) = VT_UI8; V_CY(pvar).int64 = GET_UNALIGNED_VAL64(pValue); break; case ELEMENT_TYPE_VOID: V_VT(pvar) = VT_EMPTY; break; default: _ASSERTE(!"bad constant value type!"); } return hr; } // HRESULT _FillVariant() void MDInfo::DisplayAssembly() { if (m_pAssemblyImport) { DisplayAssemblyInfo(); DisplayAssemblyRefs(); DisplayFiles(); DisplayExportedTypes(); DisplayManifestResources(); } } // void MDInfo::DisplayAssembly() void MDInfo::DisplayAssemblyInfo() { HRESULT hr; mdAssembly mda; const BYTE *pbPublicKey; ULONG cbPublicKey; ULONG ulHashAlgId; WCHAR szName[STRING_BUFFER_LEN]; ASSEMBLYMETADATA MetaData; DWORD dwFlags; hr = m_pAssemblyImport->GetAssemblyFromScope(&mda); if (hr == CLDB_E_RECORD_NOTFOUND) return; else if (FAILED(hr)) Error("GetAssemblyFromScope() failed.", hr); // Get the required sizes for the arrays of locales, processors etc. ZeroMemory(&MetaData, sizeof(ASSEMBLYMETADATA)); hr = m_pAssemblyImport->GetAssemblyProps(mda, NULL, NULL, // Public Key. NULL, // Hash Algorithm. NULL, 0, NULL, // Name. &MetaData, NULL); // Flags. if (FAILED(hr)) Error("GetAssemblyProps() failed.", hr); // Allocate space for the arrays in the ASSEMBLYMETADATA structure. if (MetaData.cbLocale) MetaData.szLocale = new WCHAR[MetaData.cbLocale]; if (MetaData.ulProcessor) MetaData.rProcessor = new DWORD[MetaData.ulProcessor]; if (MetaData.ulOS) MetaData.rOS = new OSINFO[MetaData.ulOS]; hr = m_pAssemblyImport->GetAssemblyProps(mda, (const void **)&pbPublicKey, &cbPublicKey, &ulHashAlgId, szName, STRING_BUFFER_LEN, NULL, &MetaData, &dwFlags); if (FAILED(hr)) Error("GetAssemblyProps() failed.", hr); WriteLine("Assembly"); WriteLine("-------------------------------------------------------"); VWriteLine("\tToken: 0x%08x", mda); VWriteLine("\tName : %ls", szName); DumpHex("\tPublic Key ", pbPublicKey, cbPublicKey, false, 24); VWriteLine("\tHash Algorithm : 0x%08x", ulHashAlgId); DisplayASSEMBLYMETADATA(&MetaData); if(MetaData.szLocale) delete [] MetaData.szLocale; if(MetaData.rProcessor) delete [] MetaData.rProcessor; if(MetaData.rOS) delete [] MetaData.rOS; char sFlags[STRING_BUFFER_LEN]; DWORD flags = dwFlags; sFlags[0] = 0; ISFLAG(Af, PublicKey); ISFLAG(Af, Retargetable); ISFLAG(AfContentType_, WindowsRuntime); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); VWriteLine("\tFlags : %s (%08x)", sFlags, dwFlags); DisplayCustomAttributes(mda, "\t"); DisplayPermissions(mda, "\t"); WriteLine(""); } // void MDInfo::DisplayAssemblyInfo() void MDInfo::DisplayAssemblyRefs() { HCORENUM assemblyRefEnum = NULL; mdAssemblyRef AssemblyRefs[ENUM_BUFFER_SIZE]; ULONG count; ULONG totalCount = 1; HRESULT hr; while (SUCCEEDED(hr = m_pAssemblyImport->EnumAssemblyRefs( &assemblyRefEnum, AssemblyRefs, ARRAY_SIZE(AssemblyRefs), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("AssemblyRef #%d (%08x)", totalCount, AssemblyRefs[i]); WriteLine("-------------------------------------------------------"); DisplayAssemblyRefInfo(AssemblyRefs[i]); WriteLine(""); } } m_pAssemblyImport->CloseEnum(assemblyRefEnum); } // void MDInfo::DisplayAssemblyRefs() void MDInfo::DisplayAssemblyRefInfo(mdAssemblyRef inAssemblyRef) { HRESULT hr; const BYTE *pbPublicKeyOrToken; ULONG cbPublicKeyOrToken; WCHAR szName[STRING_BUFFER_LEN]; ASSEMBLYMETADATA MetaData; const BYTE *pbHashValue; ULONG cbHashValue; DWORD dwFlags; VWriteLine("\tToken: 0x%08x", inAssemblyRef); // Get sizes for the arrays in the ASSEMBLYMETADATA structure. ZeroMemory(&MetaData, sizeof(ASSEMBLYMETADATA)); hr = m_pAssemblyImport->GetAssemblyRefProps(inAssemblyRef, NULL, NULL, // Public Key or Token. NULL, 0, NULL, // Name. &MetaData, NULL, NULL, // HashValue. NULL); // Flags. if (FAILED(hr)) Error("GetAssemblyRefProps() failed.", hr); // Allocate space for the arrays in the ASSEMBLYMETADATA structure. if (MetaData.cbLocale) MetaData.szLocale = new WCHAR[MetaData.cbLocale]; if (MetaData.ulProcessor) MetaData.rProcessor = new DWORD[MetaData.ulProcessor]; if (MetaData.ulOS) MetaData.rOS = new OSINFO[MetaData.ulOS]; hr = m_pAssemblyImport->GetAssemblyRefProps(inAssemblyRef, (const void **)&pbPublicKeyOrToken, &cbPublicKeyOrToken, szName, STRING_BUFFER_LEN, NULL, &MetaData, (const void **)&pbHashValue, &cbHashValue, &dwFlags); if (FAILED(hr)) Error("GetAssemblyRefProps() failed.", hr); DumpHex("\tPublic Key or Token", pbPublicKeyOrToken, cbPublicKeyOrToken, false, 24); VWriteLine("\tName: %ls", szName); DisplayASSEMBLYMETADATA(&MetaData); if(MetaData.szLocale) delete [] MetaData.szLocale; if(MetaData.rProcessor) delete [] MetaData.rProcessor; if(MetaData.rOS) delete [] MetaData.rOS; DumpHex("\tHashValue Blob", pbHashValue, cbHashValue, false, 24); char sFlags[STRING_BUFFER_LEN]; DWORD flags = dwFlags; sFlags[0] = 0; ISFLAG(Af, PublicKey); ISFLAG(Af, Retargetable); ISFLAG(AfContentType_, WindowsRuntime); #if 0 ISFLAG(Af, LegacyLibrary); ISFLAG(Af, LegacyPlatform); ISFLAG(Af, Library); ISFLAG(Af, Platform); #endif if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); VWriteLine("\tFlags: %s (%08x)", sFlags, dwFlags); DisplayCustomAttributes(inAssemblyRef, "\t"); WriteLine(""); } // void MDInfo::DisplayAssemblyRefInfo() void MDInfo::DisplayFiles() { HCORENUM fileEnum = NULL; mdFile Files[ENUM_BUFFER_SIZE]; ULONG count; ULONG totalCount = 1; HRESULT hr; while (SUCCEEDED(hr = m_pAssemblyImport->EnumFiles( &fileEnum, Files, ARRAY_SIZE(Files), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("File #%d (%08x)", totalCount, Files[i]); WriteLine("-------------------------------------------------------"); DisplayFileInfo(Files[i]); WriteLine(""); } } m_pAssemblyImport->CloseEnum(fileEnum); } // void MDInfo::DisplayFiles() void MDInfo::DisplayFileInfo(mdFile inFile) { HRESULT hr; WCHAR szName[STRING_BUFFER_LEN]; const BYTE *pbHashValue; ULONG cbHashValue; DWORD dwFlags; VWriteLine("\tToken: 0x%08x", inFile); hr = m_pAssemblyImport->GetFileProps(inFile, szName, STRING_BUFFER_LEN, NULL, (const void **)&pbHashValue, &cbHashValue, &dwFlags); if (FAILED(hr)) Error("GetFileProps() failed.", hr); VWriteLine("\tName : %ls", szName); DumpHex("\tHashValue Blob ", pbHashValue, cbHashValue, false, 24); char sFlags[STRING_BUFFER_LEN]; DWORD flags = dwFlags; sFlags[0] = 0; ISFLAG(Ff, ContainsMetaData); ISFLAG(Ff, ContainsNoMetaData); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); VWriteLine("\tFlags : %s (%08x)", sFlags, dwFlags); DisplayCustomAttributes(inFile, "\t"); WriteLine(""); } // MDInfo::DisplayFileInfo() void MDInfo::DisplayExportedTypes() { HCORENUM comTypeEnum = NULL; mdExportedType ExportedTypes[ENUM_BUFFER_SIZE]; ULONG count; ULONG totalCount = 1; HRESULT hr; while (SUCCEEDED(hr = m_pAssemblyImport->EnumExportedTypes( &comTypeEnum, ExportedTypes, ARRAY_SIZE(ExportedTypes), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("ExportedType #%d (%08x)", totalCount, ExportedTypes[i]); WriteLine("-------------------------------------------------------"); DisplayExportedTypeInfo(ExportedTypes[i]); WriteLine(""); } } m_pAssemblyImport->CloseEnum(comTypeEnum); } // void MDInfo::DisplayExportedTypes() void MDInfo::DisplayExportedTypeInfo(mdExportedType inExportedType) { HRESULT hr; WCHAR szName[STRING_BUFFER_LEN]; mdToken tkImplementation; mdTypeDef tkTypeDef; DWORD dwFlags; char sFlags[STRING_BUFFER_LEN]; VWriteLine("\tToken: 0x%08x", inExportedType); hr = m_pAssemblyImport->GetExportedTypeProps(inExportedType, szName, STRING_BUFFER_LEN, NULL, &tkImplementation, &tkTypeDef, &dwFlags); if (FAILED(hr)) Error("GetExportedTypeProps() failed.", hr); VWriteLine("\tName: %ls", szName); VWriteLine("\tImplementation token: 0x%08x", tkImplementation); VWriteLine("\tTypeDef token: 0x%08x", tkTypeDef); VWriteLine("\tFlags : %s (%08x)",ClassFlags(dwFlags, sFlags), dwFlags); DisplayCustomAttributes(inExportedType, "\t"); WriteLine(""); } // void MDInfo::DisplayExportedTypeInfo() void MDInfo::DisplayManifestResources() { HCORENUM manifestResourceEnum = NULL; mdManifestResource ManifestResources[ENUM_BUFFER_SIZE]; ULONG count; ULONG totalCount = 1; HRESULT hr; while (SUCCEEDED(hr = m_pAssemblyImport->EnumManifestResources( &manifestResourceEnum, ManifestResources, ARRAY_SIZE(ManifestResources), &count)) && count > 0) { for (ULONG i = 0; i < count; i++, totalCount++) { VWriteLine("ManifestResource #%d (%08x)", totalCount, ManifestResources[i]); WriteLine("-------------------------------------------------------"); DisplayManifestResourceInfo(ManifestResources[i]); WriteLine(""); } } m_pAssemblyImport->CloseEnum(manifestResourceEnum); } // void MDInfo::DisplayManifestResources() void MDInfo::DisplayManifestResourceInfo(mdManifestResource inManifestResource) { HRESULT hr; WCHAR szName[STRING_BUFFER_LEN]; mdToken tkImplementation; DWORD dwOffset; DWORD dwFlags; VWriteLine("\tToken: 0x%08x", inManifestResource); hr = m_pAssemblyImport->GetManifestResourceProps(inManifestResource, szName, STRING_BUFFER_LEN, NULL, &tkImplementation, &dwOffset, &dwFlags); if (FAILED(hr)) Error("GetManifestResourceProps() failed.", hr); VWriteLine("Name: %ls", szName); VWriteLine("Implementation token: 0x%08x", tkImplementation); VWriteLine("Offset: 0x%08x", dwOffset); char sFlags[STRING_BUFFER_LEN]; DWORD flags = dwFlags; sFlags[0] = 0; ISFLAG(Mr, Public); ISFLAG(Mr, Private); if (!*sFlags) strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]"); VWriteLine("\tFlags: %s (%08x)", sFlags, dwFlags); DisplayCustomAttributes(inManifestResource, "\t"); WriteLine(""); } // void MDInfo::DisplayManifestResourceInfo() void MDInfo::DisplayASSEMBLYMETADATA(ASSEMBLYMETADATA *pMetaData) { ULONG i; VWriteLine("\tVersion: %d.%d.%d.%d", pMetaData->usMajorVersion, pMetaData->usMinorVersion, pMetaData->usBuildNumber, pMetaData->usRevisionNumber); VWriteLine("\tMajor Version: 0x%08x", pMetaData->usMajorVersion); VWriteLine("\tMinor Version: 0x%08x", pMetaData->usMinorVersion); VWriteLine("\tBuild Number: 0x%08x", pMetaData->usBuildNumber); VWriteLine("\tRevision Number: 0x%08x", pMetaData->usRevisionNumber); VWriteLine("\tLocale: %ls", pMetaData->cbLocale ? pMetaData->szLocale : W("<null>")); for (i = 0; i < pMetaData->ulProcessor; i++) VWriteLine("\tProcessor #%ld: 0x%08x", i+1, pMetaData->rProcessor[i]); for (i = 0; i < pMetaData->ulOS; i++) { VWriteLine("\tOS #%ld:", i+1); VWriteLine("\t\tOS Platform ID: 0x%08x", pMetaData->rOS[i].dwOSPlatformId); VWriteLine("\t\tOS Major Version: 0x%08x", pMetaData->rOS[i].dwOSMajorVersion); VWriteLine("\t\tOS Minor Version: 0x%08x", pMetaData->rOS[i].dwOSMinorVersion); } } // void MDInfo::DisplayASSEMBLYMETADATA() void MDInfo::DisplayUserStrings() { HCORENUM stringEnum = NULL; // string enumerator. mdString Strings[ENUM_BUFFER_SIZE]; // String tokens from enumerator. CQuickArray<WCHAR> rUserString; // Buffer to receive string. WCHAR *szUserString; // Working pointer into buffer. ULONG chUserString; // Size of user string. CQuickArray<char> rcBuf; // Buffer to hold the BLOB version of the string. char *szBuf; // Working pointer into buffer. ULONG chBuf; // Saved size of the user string. ULONG count; // Items returned from enumerator. ULONG totalCount = 1; // Running count of strings. bool bUnprint = false; // Is an unprintable character found? HRESULT hr; // A result. while (SUCCEEDED(hr = m_pImport->EnumUserStrings( &stringEnum, Strings, ARRAY_SIZE(Strings), &count)) && count > 0) { if (totalCount == 1) { // If only one, it is the NULL string, so don't print it. WriteLine("User Strings"); WriteLine("-------------------------------------------------------"); } for (ULONG i = 0; i < count; i++, totalCount++) { do { // Try to get the string into the existing buffer. hr = m_pImport->GetUserString( Strings[i], rUserString.Ptr(),(ULONG32)rUserString.MaxSize(), &chUserString); if (hr == CLDB_S_TRUNCATION) { // Buffer wasn't big enough, try to enlarge it. if (FAILED(rUserString.ReSizeNoThrow(chUserString))) Error("malloc failed.", E_OUTOFMEMORY); continue; } } while (hr == CLDB_S_TRUNCATION); if (FAILED(hr)) Error("GetUserString failed.", hr); szUserString = rUserString.Ptr(); chBuf = chUserString; VWrite("%08x : (%2d) L\"", Strings[i], chUserString); for (ULONG j=0; j<chUserString; j++) { switch (*szUserString) { case 0: Write("\\0"); break; case L'\r': Write("\\r"); break; case L'\n': Write("\\n"); break; case L'\t': Write("\\t"); break; default: if (iswprint(*szUserString)) VWrite("%lc", *szUserString); else { bUnprint = true; Write("."); } break; } ++szUserString; if((j>0)&&((j&0x7F)==0)) WriteLine(""); } WriteLine("\""); // Print the user string as a blob if an unprintable character is found. if (bUnprint) { bUnprint = false; szUserString = rUserString.Ptr(); if (FAILED(hr = rcBuf.ReSizeNoThrow(81))) //(chBuf * 5 + 1); Error("ReSize failed.", hr); szBuf = rcBuf.Ptr(); ULONG j,k; WriteLine("\t\tUser string has unprintables, hex format below:"); for (j = 0,k=0; j < chBuf; j++) { sprintf_s (&szBuf[k*5], 81, "%04x ", szUserString[j]); k++; if((k==16)||(j == (chBuf-1))) { szBuf[k*5] = '\0'; VWriteLine("\t\t%s", szBuf); k=0; } } } } } if (stringEnum) m_pImport->CloseEnum(stringEnum); } // void MDInfo::DisplayUserStrings() void MDInfo::DisplayUnsatInfo() { HRESULT hr = S_OK; HCORENUM henum = 0; mdToken tk; ULONG cMethods; Write("\nUnresolved Externals\n"); Write("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"); while ( (hr = m_pImport->EnumUnresolvedMethods( &henum, &tk, 1, &cMethods)) == S_OK && cMethods ) { if ( TypeFromToken(tk) == mdtMethodDef ) { // a method definition without implementation DisplayMethodInfo( tk ); } else if ( TypeFromToken(tk) == mdtMemberRef ) { // an unresolved MemberRef to a global function DisplayMemberRefInfo( tk, "" ); } else { _ASSERTE(!"Unknown token kind!"); } } m_pImport->CloseEnum(henum); } // void MDInfo::DisplayUnsatInfo() //******************************************************************************* // This code is used for debugging purposes only. This will just print out the // entire database. //******************************************************************************* const char *MDInfo::DumpRawNameOfType(ULONG iType) { if (iType <= iRidMax) { const char *pNameTable; m_pTables->GetTableInfo(iType, 0,0,0,0, &pNameTable); return pNameTable; } else // Is the field a coded token? if (iType <= iCodedTokenMax) { int iCdTkn = iType - iCodedToken; const char *pNameCdTkn; m_pTables->GetCodedTokenInfo(iCdTkn, 0,0, &pNameCdTkn); return pNameCdTkn; } // Fixed type. switch (iType) { case iBYTE: return "BYTE"; case iSHORT: return "short"; case iUSHORT: return "USHORT"; case iLONG: return "long"; case iULONG: return "ULONG"; case iSTRING: return "string"; case iGUID: return "GUID"; case iBLOB: return "blob"; } // default: static char buf[30]; sprintf_s(buf, 30, "unknown type 0x%02x", iType); return buf; } // const char *MDInfo::DumpRawNameOfType() void MDInfo::DumpRawCol(ULONG ixTbl, ULONG ixCol, ULONG rid, bool bStats) { ULONG ulType; // Type of a column. ULONG ulVal; // Value of a column. LPCUTF8 pString; // Pointer to a string. const void *pBlob; // Pointer to a blob. ULONG cb; // Size of something. m_pTables->GetColumn(ixTbl, ixCol, rid, &ulVal); m_pTables->GetColumnInfo(ixTbl, ixCol, 0, 0, &ulType, 0); if (ulType <= iRidMax) { const char *pNameTable; m_pTables->GetTableInfo(ulType, 0,0,0,0, &pNameTable); VWrite("%s[%x]", pNameTable, ulVal); } else // Is the field a coded token? if (ulType <= iCodedTokenMax) { int iCdTkn = ulType - iCodedToken; const char *pNameCdTkn; m_pTables->GetCodedTokenInfo(iCdTkn, 0,0, &pNameCdTkn); VWrite("%s[%08x]", pNameCdTkn, ulVal); } else { // Fixed type. switch (ulType) { case iBYTE: VWrite("%02x", ulVal); break; case iSHORT: case iUSHORT: VWrite("%04x", ulVal); break; case iLONG: case iULONG: VWrite("%08x", ulVal); break; case iSTRING: if (ulVal && (m_DumpFilter & dumpNames)) { m_pTables->GetString(ulVal, &pString); VWrite("(%x)\"%s\"", ulVal, pString); } else VWrite("string#%x", ulVal); if (bStats && ulVal) { m_pTables->GetString(ulVal, &pString); cb = (ULONG) strlen(pString) + 1; VWrite("(%d)", cb); } break; case iGUID: VWrite("guid#%x", ulVal); if (bStats && ulVal) { VWrite("(16)"); } break; case iBLOB: VWrite("blob#%x", ulVal); if (bStats && ulVal) { m_pTables->GetBlob(ulVal, &cb, &pBlob); cb += 1; if (cb > 128) cb += 1; if (cb > 16535) cb += 1; VWrite("(%d)", cb); } break; default: VWrite("unknown type 0x%04x", ulVal); break; } } } // void MDInfo::DumpRawCol() ULONG MDInfo::DumpRawColStats(ULONG ixTbl, ULONG ixCol, ULONG cRows) { ULONG rslt = 0; ULONG ulType; // Type of a column. ULONG ulVal; // Value of a column. LPCUTF8 pString; // Pointer to a string. const void *pBlob; // Pointer to a blob. ULONG cb; // Size of something. m_pTables->GetColumnInfo(ixTbl, ixCol, 0, 0, &ulType, 0); if (IsHeapType(ulType)) { for (ULONG rid=1; rid<=cRows; ++rid) { m_pTables->GetColumn(ixTbl, ixCol, rid, &ulVal); // Fixed type. switch (ulType) { case iSTRING: if (ulVal) { m_pTables->GetString(ulVal, &pString); cb = (ULONG) strlen(pString); rslt += cb + 1; } break; case iGUID: if (ulVal) rslt += 16; break; case iBLOB: if (ulVal) { m_pTables->GetBlob(ulVal, &cb, &pBlob); rslt += cb + 1; if (cb > 128) rslt += 1; if (cb > 16535) rslt += 1; } break; default: break; } } } return rslt; } // ULONG MDInfo::DumpRawColStats() int MDInfo::DumpHex( const char *szPrefix, // String prefix for first line. const void *pvData, // The data to print. ULONG cbData, // Bytes of data to print. int bText, // If true, also dump text. ULONG nLine) // Bytes per line to print. { const BYTE *pbData = static_cast<const BYTE*>(pvData); ULONG i; // Loop control. ULONG nPrint; // Number to print in an iteration. ULONG nSpace; // Spacing calculations. ULONG nPrefix; // Size of the prefix. ULONG nLines=0; // Number of lines printed. const char *pPrefix; // For counting spaces in the prefix. // Round down to 8 characters. nLine = nLine & ~0x7; for (nPrefix=0, pPrefix=szPrefix; *pPrefix; ++pPrefix) { if (*pPrefix == '\t') nPrefix = (nPrefix + 8) & ~7; else ++nPrefix; } //nPrefix = strlen(szPrefix); do { // Write the line prefix. if (szPrefix) VWrite("%s:", szPrefix); else VWrite("%*s:", nPrefix, ""); szPrefix = 0; ++nLines; // Calculate spacing. nPrint = min(cbData, nLine); nSpace = nLine - nPrint; // dump in hex. for(i=0; i<nPrint; i++) { if ((i&7) == 0) Write(" "); VWrite("%02x ", pbData[i]); } if (bText) { // Space out to the text spot. if (nSpace) VWrite("%*s", nSpace*3+nSpace/8, ""); // Dump in text. Write(">"); for(i=0; i<nPrint; i++) VWrite("%c", (isprint(pbData[i])) ? pbData[i] : ' '); // Space out the text, and finish the line. VWrite("%*s<", nSpace, ""); } VWriteLine(""); // Next data to print. cbData -= nPrint; pbData += nPrint; } while (cbData > 0); return nLines; } // int MDInfo::DumpHex() void MDInfo::DumpRawHeaps() { HRESULT hr; // A result. ULONG ulSize; // Bytes in a heap. const BYTE *pData; // Pointer to a blob. ULONG cbData; // Size of a blob. ULONG oData; // Offset of current blob. char rcPrefix[30]; // To format line prefix. m_pTables->GetBlobHeapSize(&ulSize); VWriteLine(""); VWriteLine("Blob Heap: %d(%#x) bytes", ulSize,ulSize); oData = 0; do { m_pTables->GetBlob(oData, &cbData, (const void**)&pData); sprintf_s(rcPrefix, 30, "%5x,%-2x", oData, cbData); DumpHex(rcPrefix, pData, cbData); hr = m_pTables->GetNextBlob(oData, &oData); } while (hr == S_OK); m_pTables->GetStringHeapSize(&ulSize); VWriteLine(""); VWriteLine("String Heap: %d(%#x) bytes", ulSize,ulSize); oData = 0; const char *pString; do { m_pTables->GetString(oData, &pString); if (m_DumpFilter & dumpMoreHex) { sprintf_s(rcPrefix, 30, "%08x", oData); DumpHex(rcPrefix, pString, (ULONG)strlen(pString)+1); } else if (*pString != 0) VWrite("%08x: %s\n", oData, pString); hr = m_pTables->GetNextString(oData, &oData); } while (hr == S_OK); VWriteLine(""); DisplayUserStrings(); } // void MDInfo::DumpRawHeaps() void MDInfo::DumpRaw(int iDump, bool bunused) { ULONG cTables; // Tables in the database. ULONG cCols; // Columns in a table. ULONG cRows; // Rows in a table. ULONG cbRow; // Bytes in a row of a table. ULONG iKey; // Key column of a table. const char *pNameTable; // Name of a table. ULONG oCol; // Offset of a column. ULONG cbCol; // Size of a column. ULONG ulType; // Type of a column. const char *pNameColumn; // Name of a column. ULONG ulSize; // Heaps is easy -- there is a specific bit for that. bool bStats = (m_DumpFilter & dumpStats) != 0; // Rows are harder. Was there something else that limited data? BOOL bRows = (m_DumpFilter & (dumpSchema | dumpHeader)) == 0; BOOL bSchema = bRows || (m_DumpFilter & dumpSchema); // (m_DumpFilter & (dumpSchema | dumpHeader | dumpCSV | dumpRaw | dumpStats | dumpRawHeaps)) if (m_pTables2) { // Get the raw metadata header. const BYTE *pbData = NULL; const BYTE *pbStream = NULL; // One of the stream.s const BYTE *pbMd = NULL; // The metadata stream. ULONG cbData = 0; ULONG cbStream = 0; // One of the streams. ULONG cbMd = 0; // The metadata stream. const char *pName; HRESULT hr = S_OK; ULONG ix; m_pTables2->GetMetaDataStorage((const void**)&pbData, &cbData); // Per the ECMA spec, the section data looks like this: struct MDSTORAGESIGNATURE { ULONG lSignature; // "Magic" signature. USHORT iMajorVer; // Major file version. USHORT iMinorVer; // Minor file version. ULONG iExtraData; // Offset to next structure of information ULONG iVersionString; // Length of version string BYTE pVersion[0]; // Version string }; struct MDSTORAGEHEADER { BYTE fFlags; // STGHDR_xxx flags. BYTE pad; USHORT iStreams; // How many streams are there. }; const MDSTORAGESIGNATURE *pStorage = (const MDSTORAGESIGNATURE *) pbData; const MDSTORAGEHEADER *pSHeader = (const MDSTORAGEHEADER *)(pbData + sizeof(MDSTORAGESIGNATURE) + pStorage->iVersionString); VWriteLine("Metadata section: 0x%08x, version: %d.%d, extra: %d, version len: %d, version: %s", pStorage->lSignature, pStorage->iMajorVer, pStorage->iMinorVer, pStorage->iExtraData, pStorage->iVersionString, pStorage->pVersion); VWriteLine(" flags: 0x%02x, streams: %d", pSHeader->fFlags, pSHeader->iStreams); if (m_DumpFilter & dumpMoreHex) { const BYTE *pbEnd = pbData; ULONG cb = sizeof(MDSTORAGESIGNATURE) + pStorage->iVersionString + sizeof(MDSTORAGEHEADER); hr = m_pTables2->GetMetaDataStreamInfo(0, &pName, (const void**)&pbEnd, &cbStream); if (hr == S_OK) cb = (ULONG)(pbEnd - pbData); DumpHex(" ", pbData, cb); } for (ix=0; hr == S_OK; ++ix) { hr = m_pTables2->GetMetaDataStreamInfo(ix, &pName, (const void**)&pbStream, &cbStream); if (hr != S_OK) break; if (strcmp(pName, "#~") == 0 || strcmp(pName, "#-") == 0) { pbMd = pbStream; cbMd = cbStream; } VWriteLine("Stream %d: name: %s, size %d", ix, pName, cbStream); // hex for individual stream headers in metadata section dump. hex for // the streams themselves distributed throughout the dump. } if (pbMd) { // Per ECMA, the metadata header looks like this: struct MD { ULONG m_ulReserved; // Reserved, must be zero. BYTE m_major; // Version numbers. BYTE m_minor; BYTE m_heaps; // Bits for heap sizes. BYTE m_rid; // log-base-2 of largest rid. unsigned __int64 m_maskvalid; // Bit mask of present table counts. unsigned __int64 m_sorted; // Bit mask of sorted tables. }; }; const MD *pMd; pMd = (const MD *)pbMd; VWriteLine("Metadata header: %d.%d, heaps: 0x%02x, rid: 0x%02x, valid: 0x%016I64x, sorted: 0x%016I64x", pMd->m_major, pMd->m_minor, pMd->m_heaps, pMd->m_rid, (ULONGLONG)GET_UNALIGNED_VAL64(&(pMd->m_maskvalid)), (ULONGLONG)GET_UNALIGNED_VAL64(&(pMd->m_sorted))); if (m_DumpFilter & dumpMoreHex) { DumpHex(" ", pbMd, sizeof(MD)); } } VWriteLine(""); } m_pTables->GetNumTables(&cTables); m_pTables->GetStringHeapSize(&ulSize); VWrite("Strings: %d(%#x)", ulSize, ulSize); m_pTables->GetBlobHeapSize(&ulSize); VWrite(", Blobs: %d(%#x)", ulSize, ulSize); m_pTables->GetGuidHeapSize(&ulSize); VWrite(", Guids: %d(%#x)", ulSize, ulSize); m_pTables->GetUserStringHeapSize(&ulSize); VWriteLine(", User strings: %d(%#x)", ulSize, ulSize); for (ULONG ixTbl = 0; ixTbl < cTables; ++ixTbl) { m_pTables->GetTableInfo(ixTbl, &cbRow, &cRows, &cCols, &iKey, &pNameTable); if (bRows) // when dumping rows, print a break between row data and schema VWriteLine("================================================="); VWriteLine("%2d(%#x): %-20s cRecs:%5d(%#x), cbRec:%3d(%#x), cbTable:%6d(%#x)", ixTbl, ixTbl, pNameTable, cRows, cRows, cbRow, cbRow, cbRow * cRows, cbRow * cRows); if (!bSchema && !bRows) continue; // Dump column definitions for the table. ULONG ixCol; for (ixCol=0; ixCol<cCols; ++ixCol) { m_pTables->GetColumnInfo(ixTbl, ixCol, &oCol, &cbCol, &ulType, &pNameColumn); VWrite(" col %2x:%c %-12s oCol:%2x, cbCol:%x, %-7s", ixCol, ((ixCol==iKey)?'*':' '), pNameColumn, oCol, cbCol, DumpRawNameOfType(ulType)); if (bStats) { ulSize = DumpRawColStats(ixTbl, ixCol, cRows); if (ulSize) VWrite("(%d)", ulSize); } VWriteLine(""); } if (!bRows) continue; // Dump the rows. for (ULONG rid = 1; rid <= cRows; ++rid) { if (rid == 1) VWriteLine("-------------------------------------------------"); VWrite(" %3x == ", rid); for (ixCol=0; ixCol < cCols; ++ixCol) { if (ixCol) VWrite(", "); VWrite("%d:", ixCol); DumpRawCol(ixTbl, ixCol, rid, bStats); } VWriteLine(""); } } } // void MDInfo::DumpRaw() void MDInfo::DumpRawCSV() { ULONG cTables; // Tables in the database. ULONG cCols; // Columns in a table. ULONG cRows; // Rows in a table. ULONG cbRow; // Bytes in a row of a table. const char *pNameTable; // Name of a table. ULONG ulSize; m_pTables->GetNumTables(&cTables); VWriteLine("Name,Size,cRecs,cbRec"); m_pTables->GetStringHeapSize(&ulSize); VWriteLine("Strings,%d", ulSize); m_pTables->GetBlobHeapSize(&ulSize); VWriteLine("Blobs,%d", ulSize); m_pTables->GetGuidHeapSize(&ulSize); VWriteLine("Guids,%d", ulSize); for (ULONG ixTbl = 0; ixTbl < cTables; ++ixTbl) { m_pTables->GetTableInfo(ixTbl, &cbRow, &cRows, &cCols, NULL, &pNameTable); VWriteLine("%s,%d,%d,%d", pNameTable, cbRow*cRows, cRows, cbRow); } } // void MDInfo::DumpRawCSV()
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/bft33.txt
fatal error : Unrecognized option: '/foo'.
fatal error : Unrecognized option: '/foo'.
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/pal/src/misc/dbgmsg.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*++ Module Name: misc/dbgmsg.cpp Abstract: Implementation of Debug Message utilies. Relay channel information, output functions, etc. --*/ /* PAL headers */ #include "pal/thread.hpp" #include "pal/malloc.hpp" #include "pal/file.hpp" #include "config.h" #include "pal/dbgmsg.h" #include "pal/cruntime.h" #include "pal/critsect.h" #include "pal/file.h" #include "pal/environ.h" /* standard headers */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <pthread.h> /* for pthread_self */ #include <errno.h> #include <dirent.h> #include <dlfcn.h> /* <stdarg.h> needs to be included after "palinternal.h" to avoid name collision for va_start and va_end */ #include <stdarg.h> using namespace CorUnix; /* append mode file I/O is safer */ #define _PAL_APPEND_DBG_OUTPUT_ static const char FOPEN_FLAGS[] = "at"; /* number of ENTRY nesting levels to indicate with a '.' */ #define MAX_NESTING 50 /* size of output buffer (arbitrary) */ #define DBG_BUFFER_SIZE 20000 /* global and static variables */ LPCWSTR W16_NULLSTRING = (LPCWSTR) "N\0U\0L\0L\0\0"; DWORD dbg_channel_flags[DCI_LAST]; BOOL g_Dbg_asserts_enabled; /* we must use stdio functions directly rather that rely on PAL functions for output, because those functions do tracing and we need to avoid recursion */ FILE *output_file = NULL; /* master switch for debug channel enablement, to be modified by debugger */ Volatile<BOOL> dbg_master_switch = TRUE; static const char *dbg_channel_names[]= { "PAL", "LOADER", "HANDLE", "SHMEM", "PROCESS", "THREAD", "EXCEPT", "CRT", "UNICODE", "ARCH", "SYNC", "FILE", "VIRTUAL", "MEM", "SOCKET", "DEBUG", "LOCALE", "MISC", "MUTEX", "CRITSEC", "POLL", "CRYPT", "SHFOLDER" , "SXS" , "DCI_NUMA" }; // Verify the number of elements in dbg_channel_names static_assert_no_msg(ARRAY_SIZE(dbg_channel_names) == DCI_LAST); static const char *dbg_level_names[]= { "ENTRY", "TRACE", "WARN", "ERROR", "ASSERT", "EXIT" }; static const char ENV_FILE[]="PAL_API_TRACING"; static const char ENV_CHANNELS[]="PAL_DBG_CHANNELS"; static const char ENV_ASSERTS[]="PAL_DISABLE_ASSERTS"; static const char ENV_ENTRY_LEVELS[]="PAL_API_LEVELS"; /* per-thread storage for ENTRY tracing level */ static pthread_key_t entry_level_key; /* entry level limitation */ static int max_entry_level; /* character to use for ENTRY indentation */ static const char INDENT_CHAR = '.'; static BOOL DBG_get_indent(DBG_LEVEL_ID level, const char *format, char *indent_string); static CRITICAL_SECTION fprintf_crit_section; /* Function definitions */ /*++ Function : DBG_init_channels Parse environment variables PAL_DBG_CHANNELS and PAL_API_TRACING for debug channel settings; initialize static variables. (no parameters, no return value) --*/ BOOL DBG_init_channels(void) { INT i; LPSTR env_string; LPSTR env_workstring; LPSTR env_pcache; LPSTR entry_ptr; LPSTR level_ptr; CHAR plus_or_minus; DWORD flag_mask = 0; int ret; /* output only asserts by default [only affects no-vararg-support case; if we have varargs, these flags aren't even checked for ASSERTs] */ for(i=0;i<DCI_LAST;i++) dbg_channel_flags[i]=1<<DLI_ASSERT; /* parse PAL_DBG_CHANNELS environment variable */ env_string = getenv(ENV_CHANNELS); if (env_string != NULL) { env_string = strdup(env_string); } env_pcache = env_workstring = env_string; while(env_workstring) { entry_ptr=env_workstring; /* find beginning of next entry */ while((*entry_ptr != '\0') &&(*entry_ptr != '+') && (*entry_ptr != '-')) { entry_ptr++; } /* break if end of string is reached */ if(*entry_ptr == '\0') { break; } plus_or_minus=*entry_ptr++; /* find end of entry; if strchr returns NULL, we have reached the end of the string and we will leave the loop at the end of this pass. */ env_workstring=strchr(entry_ptr,':'); /* NULL-terminate entry, make env_string point to rest of string */ if(env_workstring) { *env_workstring++='\0'; } /* find period that separates channel name from level name */ level_ptr=strchr(entry_ptr,'.'); /* an entry with no period is illegal : ignore it */ if(!level_ptr) { continue; } /* NULL-terminate channel name, make level_ptr point to the level name */ *level_ptr++='\0'; /* build the flag mask based on requested level */ /* if "all" level is specified, we want to open/close all levels at once, so mask is either all ones or all zeroes */ if(!strcmp(level_ptr,"all")) { if(plus_or_minus=='+') { flag_mask=0xFFFF; /* OR this to open all levels */ } else { flag_mask=0; /* AND this to close all levels*/ } } else { for(i=0;i<DLI_LAST;i++) { if(!strcmp(level_ptr,dbg_level_names[i])) { if(plus_or_minus=='+') { flag_mask=1<<i; /* OR this to open the level */ } else { flag_mask=~(1<<i); /* AND this to close the level */ } break; } } /* didn't find a matching level : skip it. */ if(i==DLI_LAST) { continue; } } /* Set EXIT and ENTRY channels to be identical */ if(!(flag_mask & (1<<DLI_ENTRY))) { flag_mask = flag_mask & (~(1<<DLI_EXIT)); } else { flag_mask = flag_mask | (1<<DLI_EXIT); } /* apply the flag mask to the specified channel */ /* if "all" channel is specified, apply mask to all channels */ if(!strcmp(entry_ptr,"all")) { if(plus_or_minus=='+') { for(i=0;i<DCI_LAST;i++) { dbg_channel_flags[i] |= flag_mask; /* OR to open levels*/ } } else { for(i=0;i<DCI_LAST;i++) { dbg_channel_flags[i] &= flag_mask; /* AND to close levels */ } } } else { for(i=0;i<DCI_LAST;i++) { if(!strcmp(entry_ptr,dbg_channel_names[i])) { if(plus_or_minus=='+') { dbg_channel_flags[i] |= flag_mask; } else { dbg_channel_flags[i] &= flag_mask; } break; } } /* ignore the entry if the channel name is unknown */ } /* done processing this entry; on to the next. */ } free(env_pcache); /* select output file */ env_string = getenv(ENV_FILE); if(env_string && *env_string!='\0') { if(!strcmp(env_string, "stderr")) { output_file = stderr; } else if(!strcmp(env_string, "stdout")) { output_file = stdout; } else { output_file = fopen(env_string,FOPEN_FLAGS); /* if file can't be opened, default to stderr */ if(!output_file) { output_file = stderr; fprintf(stderr, "Can't open %s for writing : debug messages " "will go to stderr. Check your PAL_API_TRACING " "variable!\n", env_string); } } } else { output_file = stderr; /* output to stderr by default */ } /* see if we need to disable assertions */ env_string = getenv(ENV_ASSERTS); if(env_string && 0 == strcmp(env_string,"1")) { g_Dbg_asserts_enabled = FALSE; } else { g_Dbg_asserts_enabled = TRUE; } /* select ENTRY level limitation */ env_string = getenv(ENV_ENTRY_LEVELS); if(env_string) { max_entry_level = atoi(env_string); } else { max_entry_level = 1; } /* if necessary, allocate TLS key for entry nesting level */ if(0 != max_entry_level) { if ((ret = pthread_key_create(&entry_level_key,NULL)) != 0) { fprintf(stderr, "ERROR : pthread_key_create() failed error:%d (%s)\n", ret, strerror(ret)); return FALSE; } } InternalInitializeCriticalSection(&fprintf_crit_section); return TRUE; } /*++ Function : DBG_close_channels Stop outputting debug messages by closing the associated file. (no parameters, no return value) --*/ void DBG_close_channels() { if(output_file && output_file != stderr && output_file != stdout) { if (fclose(output_file) != 0) { fprintf(stderr, "ERROR : fclose() failed errno:%d (%s)\n", errno, strerror(errno)); } } output_file = NULL; DeleteCriticalSection(&fprintf_crit_section); /* if necessary, release TLS key for entry nesting level */ if(0 != max_entry_level) { int retval; retval = pthread_key_delete(entry_level_key); if(0 != retval) { fprintf(stderr, "ERROR : pthread_key_delete() returned %d! (%s)\n", retval, strerror(retval)); } } } static const void *DBG_get_module_id() { static const void *s_module_id = NULL; if (s_module_id == NULL) { Dl_info dl_info; if (dladdr((void *) DBG_get_module_id, &dl_info) == 0 || dl_info.dli_sname == NULL) { s_module_id = (void *) -1; } else { s_module_id = dl_info.dli_fbase; } } return s_module_id; } #define MODULE_ID DBG_get_module_id, #define MODULE_FORMAT "-%p" /*++ Function : DBG_printf Internal function for debug channels; don't use. This function outputs a complete debug message, including the function name. Parameters : DBG_CHANNEL_ID channel : debug channel to use DBG_LEVEL_ID level : debug message level BOOL bHeader : whether or not to output message header (thread id, etc) LPCSTR function : current function LPCSTR file : current file INT line : line number LPCSTR format, ... : standard printf parameter list. Return Value : always 1. Notes : This version is for compilers that support the C99 flavor of variable-argument macros but not the gnu flavor, and do not support the __FUNCTION__ pseudo-macro. --*/ int DBG_printf(DBG_CHANNEL_ID channel, DBG_LEVEL_ID level, BOOL bHeader, LPCSTR function, LPCSTR file, INT line, LPCSTR format, ...) { struct ErrnoHolder { int value; ErrnoHolder() : value(errno) { } ~ErrnoHolder() { errno = value; } } errno_holder; CHAR indent[MAX_NESTING+1]; if(!DBG_get_indent(level, format, indent)) { // Note: we will drop log messages here if the indent gets too high, and we won't print // an error when this occurs. return 1; } void *thread_id = (void *)THREADSilentGetCurrentThreadId(); CHAR buffer[DBG_BUFFER_SIZE]; INT output_size; if(bHeader) { /* Print file instead of function name for ENTRY messages, because those already include the function name */ /* also print file name for ASSERTs, to match Win32 behavior */ LPCSTR location; if( DLI_ENTRY == level || DLI_ASSERT == level || DLI_EXIT == level) location = file; else location = function; output_size=snprintf(buffer, DBG_BUFFER_SIZE, "{%p" MODULE_FORMAT "} %-5s [%-7s] at %s.%d: ", thread_id, MODULE_ID dbg_level_names[level], dbg_channel_names[channel], location, line); if( output_size < 0) { fprintf(stderr, "ERROR : DBG_printf: snprintf header failed errno:%d (%s)\n", errno, strerror(errno)); output_size = 0; // don't return, just drop the header from the log message } else if (output_size > DBG_BUFFER_SIZE) { output_size = DBG_BUFFER_SIZE; } } else { output_size = 0; } { va_list args; va_start(args, format); INT result = _vsnprintf_s(buffer+output_size, DBG_BUFFER_SIZE-output_size, _TRUNCATE, format, args); va_end(args); if( result < 0 ) { // if we didn't get data from _vsnprintf_s, print an error and exit if ( output_size == 0 || buffer[output_size] == '\0' ) { fprintf(stderr, "ERROR : DBG_printf: vsnprintf_s failed errno:%d (%s)\n", errno, strerror(errno)); return 1; } else if (output_size < DBG_BUFFER_SIZE) { fprintf(stderr, "ERROR : DBG_printf: message truncated, vsnprintf_s failed errno:%d (%s)\n", errno, strerror(errno)); // do not return, print what we have } } else { output_size+=result; } } if( output_size >= DBG_BUFFER_SIZE ) { fprintf(stderr, "ERROR : DBG_printf: message truncated"); } /* Use a Critical section before calling printf code to avoid holding a libc lock while another thread is calling SuspendThread on this one. */ InternalEnterCriticalSection(NULL, &fprintf_crit_section); fprintf( output_file, "%s%s", indent, buffer ); InternalLeaveCriticalSection(NULL, &fprintf_crit_section); /* flush the output to file */ if ( fflush(output_file) != 0 ) { fprintf(stderr, "ERROR : fflush() failed errno:%d (%s)\n", errno, strerror(errno)); } // Some systems support displaying a GUI dialog. We attempt this only for asserts. if ( level == DLI_ASSERT ) PAL_DisplayDialog("PAL ASSERT", buffer); return 1; } /*++ Function : DBG_get_indent generate an indentation string to be used for message output Parameters : DBG_LEVEL_ID level : level of message (DLI_ENTRY, etc) const char *format : printf format string of message char *indent_string : destination for indentation string Return value : TRUE if output can proceed, FALSE otherwise Notes: As a side-effect, this function updates the ENTRY nesting level for the current thread : it decrements it if 'format' contains the string 'return', increments it otherwise (but only if 'level' is DLI_ENTRY). The function will return FALSE if the current nesting level is beyond our treshold (max_nesting_level); it always returns TRUE for other message levels --*/ static BOOL DBG_get_indent(DBG_LEVEL_ID level, const char *format, char *indent_string) { int ret; /* determine whether to output an ENTRY line */ if(DLI_ENTRY == level||DLI_EXIT == level) { if(0 != max_entry_level) { INT_PTR nesting; /* Determine if this is an entry or an exit */ if(DLI_EXIT == level) { nesting = (INT_PTR) pthread_getspecific(entry_level_key); /* avoid going negative */ if(nesting != 0) { nesting--; if ((ret = pthread_setspecific(entry_level_key, (LPVOID)nesting)) != 0) { fprintf(stderr, "ERROR : pthread_setspecific() failed " "error:%d (%s)\n", ret, strerror(ret)); } } } else { nesting = (INT_PTR) pthread_getspecific(entry_level_key); if ((ret = pthread_setspecific(entry_level_key, (LPVOID)(nesting+1))) != 0) { fprintf(stderr, "ERROR : pthread_setspecific() failed " "error:%d (%s)\n", ret, strerror(ret)); } } /* see if we're past the level treshold */ if(nesting >= max_entry_level) { return FALSE; } /* generate indentation string */ if(MAX_NESTING < nesting) { nesting = MAX_NESTING; } memset(indent_string,INDENT_CHAR ,nesting); indent_string[nesting] = '\0'; } else { indent_string[0] = '\0'; } } else { indent_string[0] = '\0'; } return TRUE; } /*++ Function : DBG_change_entrylevel retrieve current ENTRY nesting level and [optionnally] modify it Parameters : int new_level : value to which the nesting level must be set, or -1 Return value : nesting level at the time the function was called Notes: if new_level is -1, the nesting level will not be modified --*/ int DBG_change_entrylevel(int new_level) { int old_level; int ret; if(0 == max_entry_level) { return 0; } old_level = PtrToInt(pthread_getspecific(entry_level_key)); if(-1 != new_level) { if ((ret = pthread_setspecific(entry_level_key,(LPVOID)(IntToPtr(new_level)))) != 0) { fprintf(stderr, "ERROR : pthread_setspecific() failed " "error:%d (%s)\n", ret, strerror(ret)); } } return old_level; } #if _DEBUG && defined(__APPLE__) /*++ Function: DBG_ShouldCheckStackAlignment Wires up stack alignment checks (debug builds only) --*/ static const char * PAL_CHECK_ALIGNMENT_MODE = "PAL_CheckAlignmentMode"; enum CheckAlignmentMode { // special value to indicate we've not initialized yet CheckAlignment_Uninitialized = -1, CheckAlignment_Off = 0, CheckAlignment_On = 1, CheckAlignment_Default = CheckAlignment_On }; bool DBG_ShouldCheckStackAlignment() { static CheckAlignmentMode caMode = CheckAlignment_Uninitialized; if (caMode == CheckAlignment_Uninitialized) { char* checkAlignmentSettings; bool shouldFreeCheckAlignmentSettings = false; if (palEnvironment == nullptr) { // This function might be called before the PAL environment is initialized. // In this case, use the system getenv instead. checkAlignmentSettings = ::getenv(PAL_CHECK_ALIGNMENT_MODE); } else { checkAlignmentSettings = EnvironGetenv(PAL_CHECK_ALIGNMENT_MODE); shouldFreeCheckAlignmentSettings = true; } caMode = checkAlignmentSettings ? (CheckAlignmentMode)atoi(checkAlignmentSettings) : CheckAlignment_Default; if (checkAlignmentSettings && shouldFreeCheckAlignmentSettings) { free(checkAlignmentSettings); } } return caMode == CheckAlignment_On; } #endif // _DEBUG && __APPLE__ #ifdef __APPLE__ #include "CoreFoundation/CFUserNotification.h" #include "CoreFoundation/CFString.h" #include "Security/AuthSession.h" static const char * PAL_DISPLAY_DIALOG = "PAL_DisplayDialog"; enum DisplayDialogMode { DisplayDialog_Uninitialized = -1, DisplayDialog_Suppress = 0, DisplayDialog_Show = 1, DisplayDialog_Default = DisplayDialog_Suppress, }; /*++ Function : PAL_DisplayDialog Display a simple modal dialog with an alert icon and a single OK button. Caller supplies the title of the dialog and the main text. The dialog is displayed only if the PAL_DisplayDialog environment variable is set to the value "1" and the session has access to the display. --*/ void PAL_DisplayDialog(const char *szTitle, const char *szText) { static DisplayDialogMode dispDialog = DisplayDialog_Uninitialized; if (dispDialog == DisplayDialog_Uninitialized) { char* displayDialog = EnvironGetenv(PAL_DISPLAY_DIALOG); if (displayDialog) { int i = atoi(displayDialog); free(displayDialog); switch (i) { case 0: dispDialog = DisplayDialog_Suppress; break; case 1: dispDialog = DisplayDialog_Show; break; default: // Asserting here would just be re-entrant. :/ dispDialog = DisplayDialog_Default; break; } } else dispDialog = DisplayDialog_Default; if (dispDialog == DisplayDialog_Show) { // We may not be allowed to show. OSStatus osstatus; SecuritySessionId secSession; SessionAttributeBits secSessionInfo; osstatus = SessionGetInfo(callerSecuritySession, &secSession, &secSessionInfo); if (noErr != osstatus || (secSessionInfo & sessionHasGraphicAccess) == 0) dispDialog = DisplayDialog_Suppress; } } if (dispDialog == DisplayDialog_Suppress) return; CFStringRef cfsTitle = CFStringCreateWithCString(kCFAllocatorDefault, szTitle, kCFStringEncodingUTF8); if (cfsTitle != NULL) { CFStringRef cfsText = CFStringCreateWithCString(kCFAllocatorDefault, szText, kCFStringEncodingUTF8); if (cfsText != NULL) { CFOptionFlags response; CFUserNotificationDisplayAlert(0, // Never time-out, wait for user to hit 'OK' 0, // No flags NULL, // Default icon NULL, // Default sound NULL, // No-localization support for text cfsTitle, // Title for dialog cfsText, // The actual alert text NULL, // Default default button title ('OK') NULL, // No alternate button NULL, // No third button &response); // User's response (discarded) CFRelease(cfsText); } CFRelease(cfsTitle); } } /*++ Function : PAL_DisplayDialogFormatted As above but takes a printf-style format string and insertion values to form the main text. --*/ void PAL_DisplayDialogFormatted(const char *szTitle, const char *szTextFormat, ...) { va_list args; va_start(args, szTextFormat); const int cchBuffer = 4096; char *szBuffer = (char*)alloca(cchBuffer); _vsnprintf_s(szBuffer, cchBuffer, _TRUNCATE, szTextFormat, args); PAL_DisplayDialog(szTitle, szBuffer); va_end(args); } #endif // __APPLE__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*++ Module Name: misc/dbgmsg.cpp Abstract: Implementation of Debug Message utilies. Relay channel information, output functions, etc. --*/ /* PAL headers */ #include "pal/thread.hpp" #include "pal/malloc.hpp" #include "pal/file.hpp" #include "config.h" #include "pal/dbgmsg.h" #include "pal/cruntime.h" #include "pal/critsect.h" #include "pal/file.h" #include "pal/environ.h" /* standard headers */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <pthread.h> /* for pthread_self */ #include <errno.h> #include <dirent.h> #include <dlfcn.h> /* <stdarg.h> needs to be included after "palinternal.h" to avoid name collision for va_start and va_end */ #include <stdarg.h> using namespace CorUnix; /* append mode file I/O is safer */ #define _PAL_APPEND_DBG_OUTPUT_ static const char FOPEN_FLAGS[] = "at"; /* number of ENTRY nesting levels to indicate with a '.' */ #define MAX_NESTING 50 /* size of output buffer (arbitrary) */ #define DBG_BUFFER_SIZE 20000 /* global and static variables */ LPCWSTR W16_NULLSTRING = (LPCWSTR) "N\0U\0L\0L\0\0"; DWORD dbg_channel_flags[DCI_LAST]; BOOL g_Dbg_asserts_enabled; /* we must use stdio functions directly rather that rely on PAL functions for output, because those functions do tracing and we need to avoid recursion */ FILE *output_file = NULL; /* master switch for debug channel enablement, to be modified by debugger */ Volatile<BOOL> dbg_master_switch = TRUE; static const char *dbg_channel_names[]= { "PAL", "LOADER", "HANDLE", "SHMEM", "PROCESS", "THREAD", "EXCEPT", "CRT", "UNICODE", "ARCH", "SYNC", "FILE", "VIRTUAL", "MEM", "SOCKET", "DEBUG", "LOCALE", "MISC", "MUTEX", "CRITSEC", "POLL", "CRYPT", "SHFOLDER" , "SXS" , "DCI_NUMA" }; // Verify the number of elements in dbg_channel_names static_assert_no_msg(ARRAY_SIZE(dbg_channel_names) == DCI_LAST); static const char *dbg_level_names[]= { "ENTRY", "TRACE", "WARN", "ERROR", "ASSERT", "EXIT" }; static const char ENV_FILE[]="PAL_API_TRACING"; static const char ENV_CHANNELS[]="PAL_DBG_CHANNELS"; static const char ENV_ASSERTS[]="PAL_DISABLE_ASSERTS"; static const char ENV_ENTRY_LEVELS[]="PAL_API_LEVELS"; /* per-thread storage for ENTRY tracing level */ static pthread_key_t entry_level_key; /* entry level limitation */ static int max_entry_level; /* character to use for ENTRY indentation */ static const char INDENT_CHAR = '.'; static BOOL DBG_get_indent(DBG_LEVEL_ID level, const char *format, char *indent_string); static CRITICAL_SECTION fprintf_crit_section; /* Function definitions */ /*++ Function : DBG_init_channels Parse environment variables PAL_DBG_CHANNELS and PAL_API_TRACING for debug channel settings; initialize static variables. (no parameters, no return value) --*/ BOOL DBG_init_channels(void) { INT i; LPSTR env_string; LPSTR env_workstring; LPSTR env_pcache; LPSTR entry_ptr; LPSTR level_ptr; CHAR plus_or_minus; DWORD flag_mask = 0; int ret; /* output only asserts by default [only affects no-vararg-support case; if we have varargs, these flags aren't even checked for ASSERTs] */ for(i=0;i<DCI_LAST;i++) dbg_channel_flags[i]=1<<DLI_ASSERT; /* parse PAL_DBG_CHANNELS environment variable */ env_string = getenv(ENV_CHANNELS); if (env_string != NULL) { env_string = strdup(env_string); } env_pcache = env_workstring = env_string; while(env_workstring) { entry_ptr=env_workstring; /* find beginning of next entry */ while((*entry_ptr != '\0') &&(*entry_ptr != '+') && (*entry_ptr != '-')) { entry_ptr++; } /* break if end of string is reached */ if(*entry_ptr == '\0') { break; } plus_or_minus=*entry_ptr++; /* find end of entry; if strchr returns NULL, we have reached the end of the string and we will leave the loop at the end of this pass. */ env_workstring=strchr(entry_ptr,':'); /* NULL-terminate entry, make env_string point to rest of string */ if(env_workstring) { *env_workstring++='\0'; } /* find period that separates channel name from level name */ level_ptr=strchr(entry_ptr,'.'); /* an entry with no period is illegal : ignore it */ if(!level_ptr) { continue; } /* NULL-terminate channel name, make level_ptr point to the level name */ *level_ptr++='\0'; /* build the flag mask based on requested level */ /* if "all" level is specified, we want to open/close all levels at once, so mask is either all ones or all zeroes */ if(!strcmp(level_ptr,"all")) { if(plus_or_minus=='+') { flag_mask=0xFFFF; /* OR this to open all levels */ } else { flag_mask=0; /* AND this to close all levels*/ } } else { for(i=0;i<DLI_LAST;i++) { if(!strcmp(level_ptr,dbg_level_names[i])) { if(plus_or_minus=='+') { flag_mask=1<<i; /* OR this to open the level */ } else { flag_mask=~(1<<i); /* AND this to close the level */ } break; } } /* didn't find a matching level : skip it. */ if(i==DLI_LAST) { continue; } } /* Set EXIT and ENTRY channels to be identical */ if(!(flag_mask & (1<<DLI_ENTRY))) { flag_mask = flag_mask & (~(1<<DLI_EXIT)); } else { flag_mask = flag_mask | (1<<DLI_EXIT); } /* apply the flag mask to the specified channel */ /* if "all" channel is specified, apply mask to all channels */ if(!strcmp(entry_ptr,"all")) { if(plus_or_minus=='+') { for(i=0;i<DCI_LAST;i++) { dbg_channel_flags[i] |= flag_mask; /* OR to open levels*/ } } else { for(i=0;i<DCI_LAST;i++) { dbg_channel_flags[i] &= flag_mask; /* AND to close levels */ } } } else { for(i=0;i<DCI_LAST;i++) { if(!strcmp(entry_ptr,dbg_channel_names[i])) { if(plus_or_minus=='+') { dbg_channel_flags[i] |= flag_mask; } else { dbg_channel_flags[i] &= flag_mask; } break; } } /* ignore the entry if the channel name is unknown */ } /* done processing this entry; on to the next. */ } free(env_pcache); /* select output file */ env_string = getenv(ENV_FILE); if(env_string && *env_string!='\0') { if(!strcmp(env_string, "stderr")) { output_file = stderr; } else if(!strcmp(env_string, "stdout")) { output_file = stdout; } else { output_file = fopen(env_string,FOPEN_FLAGS); /* if file can't be opened, default to stderr */ if(!output_file) { output_file = stderr; fprintf(stderr, "Can't open %s for writing : debug messages " "will go to stderr. Check your PAL_API_TRACING " "variable!\n", env_string); } } } else { output_file = stderr; /* output to stderr by default */ } /* see if we need to disable assertions */ env_string = getenv(ENV_ASSERTS); if(env_string && 0 == strcmp(env_string,"1")) { g_Dbg_asserts_enabled = FALSE; } else { g_Dbg_asserts_enabled = TRUE; } /* select ENTRY level limitation */ env_string = getenv(ENV_ENTRY_LEVELS); if(env_string) { max_entry_level = atoi(env_string); } else { max_entry_level = 1; } /* if necessary, allocate TLS key for entry nesting level */ if(0 != max_entry_level) { if ((ret = pthread_key_create(&entry_level_key,NULL)) != 0) { fprintf(stderr, "ERROR : pthread_key_create() failed error:%d (%s)\n", ret, strerror(ret)); return FALSE; } } InternalInitializeCriticalSection(&fprintf_crit_section); return TRUE; } /*++ Function : DBG_close_channels Stop outputting debug messages by closing the associated file. (no parameters, no return value) --*/ void DBG_close_channels() { if(output_file && output_file != stderr && output_file != stdout) { if (fclose(output_file) != 0) { fprintf(stderr, "ERROR : fclose() failed errno:%d (%s)\n", errno, strerror(errno)); } } output_file = NULL; DeleteCriticalSection(&fprintf_crit_section); /* if necessary, release TLS key for entry nesting level */ if(0 != max_entry_level) { int retval; retval = pthread_key_delete(entry_level_key); if(0 != retval) { fprintf(stderr, "ERROR : pthread_key_delete() returned %d! (%s)\n", retval, strerror(retval)); } } } static const void *DBG_get_module_id() { static const void *s_module_id = NULL; if (s_module_id == NULL) { Dl_info dl_info; if (dladdr((void *) DBG_get_module_id, &dl_info) == 0 || dl_info.dli_sname == NULL) { s_module_id = (void *) -1; } else { s_module_id = dl_info.dli_fbase; } } return s_module_id; } #define MODULE_ID DBG_get_module_id, #define MODULE_FORMAT "-%p" /*++ Function : DBG_printf Internal function for debug channels; don't use. This function outputs a complete debug message, including the function name. Parameters : DBG_CHANNEL_ID channel : debug channel to use DBG_LEVEL_ID level : debug message level BOOL bHeader : whether or not to output message header (thread id, etc) LPCSTR function : current function LPCSTR file : current file INT line : line number LPCSTR format, ... : standard printf parameter list. Return Value : always 1. Notes : This version is for compilers that support the C99 flavor of variable-argument macros but not the gnu flavor, and do not support the __FUNCTION__ pseudo-macro. --*/ int DBG_printf(DBG_CHANNEL_ID channel, DBG_LEVEL_ID level, BOOL bHeader, LPCSTR function, LPCSTR file, INT line, LPCSTR format, ...) { struct ErrnoHolder { int value; ErrnoHolder() : value(errno) { } ~ErrnoHolder() { errno = value; } } errno_holder; CHAR indent[MAX_NESTING+1]; if(!DBG_get_indent(level, format, indent)) { // Note: we will drop log messages here if the indent gets too high, and we won't print // an error when this occurs. return 1; } void *thread_id = (void *)THREADSilentGetCurrentThreadId(); CHAR buffer[DBG_BUFFER_SIZE]; INT output_size; if(bHeader) { /* Print file instead of function name for ENTRY messages, because those already include the function name */ /* also print file name for ASSERTs, to match Win32 behavior */ LPCSTR location; if( DLI_ENTRY == level || DLI_ASSERT == level || DLI_EXIT == level) location = file; else location = function; output_size=snprintf(buffer, DBG_BUFFER_SIZE, "{%p" MODULE_FORMAT "} %-5s [%-7s] at %s.%d: ", thread_id, MODULE_ID dbg_level_names[level], dbg_channel_names[channel], location, line); if( output_size < 0) { fprintf(stderr, "ERROR : DBG_printf: snprintf header failed errno:%d (%s)\n", errno, strerror(errno)); output_size = 0; // don't return, just drop the header from the log message } else if (output_size > DBG_BUFFER_SIZE) { output_size = DBG_BUFFER_SIZE; } } else { output_size = 0; } { va_list args; va_start(args, format); INT result = _vsnprintf_s(buffer+output_size, DBG_BUFFER_SIZE-output_size, _TRUNCATE, format, args); va_end(args); if( result < 0 ) { // if we didn't get data from _vsnprintf_s, print an error and exit if ( output_size == 0 || buffer[output_size] == '\0' ) { fprintf(stderr, "ERROR : DBG_printf: vsnprintf_s failed errno:%d (%s)\n", errno, strerror(errno)); return 1; } else if (output_size < DBG_BUFFER_SIZE) { fprintf(stderr, "ERROR : DBG_printf: message truncated, vsnprintf_s failed errno:%d (%s)\n", errno, strerror(errno)); // do not return, print what we have } } else { output_size+=result; } } if( output_size >= DBG_BUFFER_SIZE ) { fprintf(stderr, "ERROR : DBG_printf: message truncated"); } /* Use a Critical section before calling printf code to avoid holding a libc lock while another thread is calling SuspendThread on this one. */ InternalEnterCriticalSection(NULL, &fprintf_crit_section); fprintf( output_file, "%s%s", indent, buffer ); InternalLeaveCriticalSection(NULL, &fprintf_crit_section); /* flush the output to file */ if ( fflush(output_file) != 0 ) { fprintf(stderr, "ERROR : fflush() failed errno:%d (%s)\n", errno, strerror(errno)); } // Some systems support displaying a GUI dialog. We attempt this only for asserts. if ( level == DLI_ASSERT ) PAL_DisplayDialog("PAL ASSERT", buffer); return 1; } /*++ Function : DBG_get_indent generate an indentation string to be used for message output Parameters : DBG_LEVEL_ID level : level of message (DLI_ENTRY, etc) const char *format : printf format string of message char *indent_string : destination for indentation string Return value : TRUE if output can proceed, FALSE otherwise Notes: As a side-effect, this function updates the ENTRY nesting level for the current thread : it decrements it if 'format' contains the string 'return', increments it otherwise (but only if 'level' is DLI_ENTRY). The function will return FALSE if the current nesting level is beyond our treshold (max_nesting_level); it always returns TRUE for other message levels --*/ static BOOL DBG_get_indent(DBG_LEVEL_ID level, const char *format, char *indent_string) { int ret; /* determine whether to output an ENTRY line */ if(DLI_ENTRY == level||DLI_EXIT == level) { if(0 != max_entry_level) { INT_PTR nesting; /* Determine if this is an entry or an exit */ if(DLI_EXIT == level) { nesting = (INT_PTR) pthread_getspecific(entry_level_key); /* avoid going negative */ if(nesting != 0) { nesting--; if ((ret = pthread_setspecific(entry_level_key, (LPVOID)nesting)) != 0) { fprintf(stderr, "ERROR : pthread_setspecific() failed " "error:%d (%s)\n", ret, strerror(ret)); } } } else { nesting = (INT_PTR) pthread_getspecific(entry_level_key); if ((ret = pthread_setspecific(entry_level_key, (LPVOID)(nesting+1))) != 0) { fprintf(stderr, "ERROR : pthread_setspecific() failed " "error:%d (%s)\n", ret, strerror(ret)); } } /* see if we're past the level treshold */ if(nesting >= max_entry_level) { return FALSE; } /* generate indentation string */ if(MAX_NESTING < nesting) { nesting = MAX_NESTING; } memset(indent_string,INDENT_CHAR ,nesting); indent_string[nesting] = '\0'; } else { indent_string[0] = '\0'; } } else { indent_string[0] = '\0'; } return TRUE; } /*++ Function : DBG_change_entrylevel retrieve current ENTRY nesting level and [optionnally] modify it Parameters : int new_level : value to which the nesting level must be set, or -1 Return value : nesting level at the time the function was called Notes: if new_level is -1, the nesting level will not be modified --*/ int DBG_change_entrylevel(int new_level) { int old_level; int ret; if(0 == max_entry_level) { return 0; } old_level = PtrToInt(pthread_getspecific(entry_level_key)); if(-1 != new_level) { if ((ret = pthread_setspecific(entry_level_key,(LPVOID)(IntToPtr(new_level)))) != 0) { fprintf(stderr, "ERROR : pthread_setspecific() failed " "error:%d (%s)\n", ret, strerror(ret)); } } return old_level; } #if _DEBUG && defined(__APPLE__) /*++ Function: DBG_ShouldCheckStackAlignment Wires up stack alignment checks (debug builds only) --*/ static const char * PAL_CHECK_ALIGNMENT_MODE = "PAL_CheckAlignmentMode"; enum CheckAlignmentMode { // special value to indicate we've not initialized yet CheckAlignment_Uninitialized = -1, CheckAlignment_Off = 0, CheckAlignment_On = 1, CheckAlignment_Default = CheckAlignment_On }; bool DBG_ShouldCheckStackAlignment() { static CheckAlignmentMode caMode = CheckAlignment_Uninitialized; if (caMode == CheckAlignment_Uninitialized) { char* checkAlignmentSettings; bool shouldFreeCheckAlignmentSettings = false; if (palEnvironment == nullptr) { // This function might be called before the PAL environment is initialized. // In this case, use the system getenv instead. checkAlignmentSettings = ::getenv(PAL_CHECK_ALIGNMENT_MODE); } else { checkAlignmentSettings = EnvironGetenv(PAL_CHECK_ALIGNMENT_MODE); shouldFreeCheckAlignmentSettings = true; } caMode = checkAlignmentSettings ? (CheckAlignmentMode)atoi(checkAlignmentSettings) : CheckAlignment_Default; if (checkAlignmentSettings && shouldFreeCheckAlignmentSettings) { free(checkAlignmentSettings); } } return caMode == CheckAlignment_On; } #endif // _DEBUG && __APPLE__ #ifdef __APPLE__ #include "CoreFoundation/CFUserNotification.h" #include "CoreFoundation/CFString.h" #include "Security/AuthSession.h" static const char * PAL_DISPLAY_DIALOG = "PAL_DisplayDialog"; enum DisplayDialogMode { DisplayDialog_Uninitialized = -1, DisplayDialog_Suppress = 0, DisplayDialog_Show = 1, DisplayDialog_Default = DisplayDialog_Suppress, }; /*++ Function : PAL_DisplayDialog Display a simple modal dialog with an alert icon and a single OK button. Caller supplies the title of the dialog and the main text. The dialog is displayed only if the PAL_DisplayDialog environment variable is set to the value "1" and the session has access to the display. --*/ void PAL_DisplayDialog(const char *szTitle, const char *szText) { static DisplayDialogMode dispDialog = DisplayDialog_Uninitialized; if (dispDialog == DisplayDialog_Uninitialized) { char* displayDialog = EnvironGetenv(PAL_DISPLAY_DIALOG); if (displayDialog) { int i = atoi(displayDialog); free(displayDialog); switch (i) { case 0: dispDialog = DisplayDialog_Suppress; break; case 1: dispDialog = DisplayDialog_Show; break; default: // Asserting here would just be re-entrant. :/ dispDialog = DisplayDialog_Default; break; } } else dispDialog = DisplayDialog_Default; if (dispDialog == DisplayDialog_Show) { // We may not be allowed to show. OSStatus osstatus; SecuritySessionId secSession; SessionAttributeBits secSessionInfo; osstatus = SessionGetInfo(callerSecuritySession, &secSession, &secSessionInfo); if (noErr != osstatus || (secSessionInfo & sessionHasGraphicAccess) == 0) dispDialog = DisplayDialog_Suppress; } } if (dispDialog == DisplayDialog_Suppress) return; CFStringRef cfsTitle = CFStringCreateWithCString(kCFAllocatorDefault, szTitle, kCFStringEncodingUTF8); if (cfsTitle != NULL) { CFStringRef cfsText = CFStringCreateWithCString(kCFAllocatorDefault, szText, kCFStringEncodingUTF8); if (cfsText != NULL) { CFOptionFlags response; CFUserNotificationDisplayAlert(0, // Never time-out, wait for user to hit 'OK' 0, // No flags NULL, // Default icon NULL, // Default sound NULL, // No-localization support for text cfsTitle, // Title for dialog cfsText, // The actual alert text NULL, // Default default button title ('OK') NULL, // No alternate button NULL, // No third button &response); // User's response (discarded) CFRelease(cfsText); } CFRelease(cfsTitle); } } /*++ Function : PAL_DisplayDialogFormatted As above but takes a printf-style format string and insertion values to form the main text. --*/ void PAL_DisplayDialogFormatted(const char *szTitle, const char *szTextFormat, ...) { va_list args; va_start(args, szTextFormat); const int cchBuffer = 4096; char *szBuffer = (char*)alloca(cchBuffer); _vsnprintf_s(szBuffer, cchBuffer, _TRUNCATE, szTextFormat, args); PAL_DisplayDialog(szTitle, szBuffer); va_end(args); } #endif // __APPLE__
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/pal/tests/palsuite/threading/GetCurrentThread/test2/test2.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================= ** ** Source: test2.c ** ** Dependencies: PAL_Initialize ** PAL_Terminate ** Fail ** CreateThread ** SetThreadPriority ** GetThreadPriority ** ResumeThread ** WaitForSingleObject ** GetLastError ** ** Purpose: ** ** Test to ensure proper operation of the GetCurrentThread() ** API. The test launches a thread in suspended mode, and sets ** its priority to a non-default value using the handle returned ** by CreateThread(). The new thread calls GetCurrentThred() to ** retrieve a handle to itself, and calls GetThreadPriority() ** to verify that its priority matches what it was set to on ** the main execution thread. ** ** **===========================================================================*/ #include <palsuite.h> /* we store the return code from the child thread here because */ /* we're missing the GetExitCodeThread() API */ static int g_priority = 0; /** * ThreadFunc * * Thread function that calls GetCurrentThread() to get a pseudo-handle * to itself, then checks its priority and exits with that value. */ DWORD PALAPI ThreadFunc_GetCurrentThread_test2( LPVOID param ) { int priority; HANDLE hThread; /* call GetCurrentThread() to get a pseudo-handle to */ /* the current thread */ hThread = GetCurrentThread(); if( hThread == NULL ) { Fail( "GetCurrentThread() call failed\n" ); } /* get the current thread priority */ priority = GetThreadPriority( hThread ); if( priority == THREAD_PRIORITY_ERROR_RETURN ) { /* GetThreadPriority call failed */ Fail( "ERROR:%lu:GetThreadPriority() call failed\n", GetLastError() ); } /* store this globally because we don't have GetExitCodeThread() */ g_priority = priority; return (DWORD)priority; } /** * main * * executable entry point */ PALTEST(threading_GetCurrentThread_test2_paltest_getcurrentthread_test2, "threading/GetCurrentThread/test2/paltest_getcurrentthread_test2") { HANDLE hThread = NULL; DWORD IDThread; DWORD dwRet; SIZE_T i = 0; /* PAL initialization */ if( (PAL_Initialize(argc, argv)) != 0 ) { return( FAIL ); } #if !HAVE_SCHED_OTHER_ASSIGNABLE /* Defining thread priority for SCHED_OTHER is implementation defined. Some platforms like NetBSD cannot reassign it as they are dynamic. */ printf("paltest_getcurrentthread_test2 has been disabled on this platform\n"); #else /* Create multiple threads. */ hThread = CreateThread( NULL, /* no security attributes */ 0, /* use default stack size */ (LPTHREAD_START_ROUTINE) ThreadFunc_GetCurrentThread_test2, /* thread function */ (LPVOID) i, /* pass thread index as */ /* function argument */ CREATE_SUSPENDED, /* create suspended */ &IDThread ); /* returns thread identifier */ /* Check the return value for success. */ if( hThread == NULL ) { /* ERROR */ Fail( "ERROR:%lu:CreateThread failed\n", GetLastError() ); } /* set the thread priority of the new thread to the highest value */ if( ! SetThreadPriority( hThread, THREAD_PRIORITY_TIME_CRITICAL) ) { Fail( "ERROR:%lu:SetThreadPriority() call failed\n", GetLastError() ); } /* let the child thread run now */ ResumeThread( hThread ); /* wait for the thread to finish */ dwRet = WaitForSingleObject( hThread, INFINITE ); if( dwRet == WAIT_FAILED ) { /* ERROR */ Fail( "ERROR:%lu:WaitForSingleObject call failed\n", GetLastError() ); } /* validate the thread's exit code */ if( g_priority != THREAD_PRIORITY_TIME_CRITICAL ) { /* ERROR */ Fail( "FAIL:Unexpected thread priority %d returned, expected %d\n", g_priority, THREAD_PRIORITY_TIME_CRITICAL ); } #endif PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================= ** ** Source: test2.c ** ** Dependencies: PAL_Initialize ** PAL_Terminate ** Fail ** CreateThread ** SetThreadPriority ** GetThreadPriority ** ResumeThread ** WaitForSingleObject ** GetLastError ** ** Purpose: ** ** Test to ensure proper operation of the GetCurrentThread() ** API. The test launches a thread in suspended mode, and sets ** its priority to a non-default value using the handle returned ** by CreateThread(). The new thread calls GetCurrentThred() to ** retrieve a handle to itself, and calls GetThreadPriority() ** to verify that its priority matches what it was set to on ** the main execution thread. ** ** **===========================================================================*/ #include <palsuite.h> /* we store the return code from the child thread here because */ /* we're missing the GetExitCodeThread() API */ static int g_priority = 0; /** * ThreadFunc * * Thread function that calls GetCurrentThread() to get a pseudo-handle * to itself, then checks its priority and exits with that value. */ DWORD PALAPI ThreadFunc_GetCurrentThread_test2( LPVOID param ) { int priority; HANDLE hThread; /* call GetCurrentThread() to get a pseudo-handle to */ /* the current thread */ hThread = GetCurrentThread(); if( hThread == NULL ) { Fail( "GetCurrentThread() call failed\n" ); } /* get the current thread priority */ priority = GetThreadPriority( hThread ); if( priority == THREAD_PRIORITY_ERROR_RETURN ) { /* GetThreadPriority call failed */ Fail( "ERROR:%lu:GetThreadPriority() call failed\n", GetLastError() ); } /* store this globally because we don't have GetExitCodeThread() */ g_priority = priority; return (DWORD)priority; } /** * main * * executable entry point */ PALTEST(threading_GetCurrentThread_test2_paltest_getcurrentthread_test2, "threading/GetCurrentThread/test2/paltest_getcurrentthread_test2") { HANDLE hThread = NULL; DWORD IDThread; DWORD dwRet; SIZE_T i = 0; /* PAL initialization */ if( (PAL_Initialize(argc, argv)) != 0 ) { return( FAIL ); } #if !HAVE_SCHED_OTHER_ASSIGNABLE /* Defining thread priority for SCHED_OTHER is implementation defined. Some platforms like NetBSD cannot reassign it as they are dynamic. */ printf("paltest_getcurrentthread_test2 has been disabled on this platform\n"); #else /* Create multiple threads. */ hThread = CreateThread( NULL, /* no security attributes */ 0, /* use default stack size */ (LPTHREAD_START_ROUTINE) ThreadFunc_GetCurrentThread_test2, /* thread function */ (LPVOID) i, /* pass thread index as */ /* function argument */ CREATE_SUSPENDED, /* create suspended */ &IDThread ); /* returns thread identifier */ /* Check the return value for success. */ if( hThread == NULL ) { /* ERROR */ Fail( "ERROR:%lu:CreateThread failed\n", GetLastError() ); } /* set the thread priority of the new thread to the highest value */ if( ! SetThreadPriority( hThread, THREAD_PRIORITY_TIME_CRITICAL) ) { Fail( "ERROR:%lu:SetThreadPriority() call failed\n", GetLastError() ); } /* let the child thread run now */ ResumeThread( hThread ); /* wait for the thread to finish */ dwRet = WaitForSingleObject( hThread, INFINITE ); if( dwRet == WAIT_FAILED ) { /* ERROR */ Fail( "ERROR:%lu:WaitForSingleObject call failed\n", GetLastError() ); } /* validate the thread's exit code */ if( g_priority != THREAD_PRIORITY_TIME_CRITICAL ) { /* ERROR */ Fail( "FAIL:Unexpected thread priority %d returned, expected %d\n", g_priority, THREAD_PRIORITY_TIME_CRITICAL ); } #endif PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/pal/tests/palsuite/miscellaneous/FormatMessageW/test1/test.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source : test.c ** ** Purpose: Test for FormatMessageW() function ** ** **=========================================================*/ #define UNICODE #include <palsuite.h> PALTEST(miscellaneous_FormatMessageW_test1_paltest_formatmessagew_test1, "miscellaneous/FormatMessageW/test1/paltest_formatmessagew_test1") { WCHAR TheString[] = {'P','a','l',' ','T','e','s','t','\0'}; WCHAR OutBuffer[128]; int ReturnResult; /* * Initialize the PAL and return FAILURE if this fails */ if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } ReturnResult = FormatMessage( FORMAT_MESSAGE_FROM_STRING, /* source and processing options */ TheString, /* message source */ 0, /* message identifier */ 0, /* language identifier */ OutBuffer, /* message buffer */ 1024, /* maximum size of message buffer */ NULL /* array of message inserts */ ); if(ReturnResult == 0) { Fail("ERROR: The return value was 0, which indicates failure. " "The function failed when trying to Format a simple string" ", with no formatters in it."); } if(memcmp(OutBuffer,TheString,wcslen(OutBuffer)*2+2) != 0) { Fail("ERROR: The formatted string should be %s but is really %s.", convertC(TheString), convertC(OutBuffer)); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source : test.c ** ** Purpose: Test for FormatMessageW() function ** ** **=========================================================*/ #define UNICODE #include <palsuite.h> PALTEST(miscellaneous_FormatMessageW_test1_paltest_formatmessagew_test1, "miscellaneous/FormatMessageW/test1/paltest_formatmessagew_test1") { WCHAR TheString[] = {'P','a','l',' ','T','e','s','t','\0'}; WCHAR OutBuffer[128]; int ReturnResult; /* * Initialize the PAL and return FAILURE if this fails */ if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } ReturnResult = FormatMessage( FORMAT_MESSAGE_FROM_STRING, /* source and processing options */ TheString, /* message source */ 0, /* message identifier */ 0, /* language identifier */ OutBuffer, /* message buffer */ 1024, /* maximum size of message buffer */ NULL /* array of message inserts */ ); if(ReturnResult == 0) { Fail("ERROR: The return value was 0, which indicates failure. " "The function failed when trying to Format a simple string" ", with no formatters in it."); } if(memcmp(OutBuffer,TheString,wcslen(OutBuffer)*2+2) != 0) { Fail("ERROR: The formatted string should be %s but is really %s.", convertC(TheString), convertC(OutBuffer)); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/binder/applicationcontext.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ============================================================ // // ApplicationContext.cpp // // // Implements the ApplicationContext class // // ============================================================ #include "applicationcontext.hpp" #include "stringarraylist.h" #include "loadcontext.hpp" #include "failurecache.hpp" #include "assemblyidentitycache.hpp" #include "utils.hpp" #include "ex.h" #include "clr/fs/path.h" using namespace clr::fs; namespace BINDER_SPACE { ApplicationContext::ApplicationContext() { m_pExecutionContext = NULL; m_pFailureCache = NULL; m_contextCS = NULL; m_pTrustedPlatformAssemblyMap = nullptr; } ApplicationContext::~ApplicationContext() { SAFE_DELETE(m_pExecutionContext); SAFE_DELETE(m_pFailureCache); if (m_contextCS != NULL) { ClrDeleteCriticalSection(m_contextCS); } if (m_pTrustedPlatformAssemblyMap != nullptr) { delete m_pTrustedPlatformAssemblyMap; } } HRESULT ApplicationContext::Init() { HRESULT hr = S_OK; NewHolder<ExecutionContext> pExecutionContext; FailureCache *pFailureCache = NULL; // Allocate context objects SAFE_NEW(pExecutionContext, ExecutionContext); SAFE_NEW(pFailureCache, FailureCache); m_contextCS = ClrCreateCriticalSection( CrstFusionAppCtx, CRST_REENTRANCY); if (!m_contextCS) { SAFE_DELETE(pFailureCache); hr = E_OUTOFMEMORY; } else { m_pExecutionContext = pExecutionContext.Extract(); m_pFailureCache = pFailureCache; } Exit: return hr; } HRESULT ApplicationContext::SetupBindingPaths(SString &sTrustedPlatformAssemblies, SString &sPlatformResourceRoots, SString &sAppPaths, BOOL fAcquireLock) { HRESULT hr = S_OK; CRITSEC_Holder contextLock(fAcquireLock ? GetCriticalSectionCookie() : NULL); if (m_pTrustedPlatformAssemblyMap != nullptr) { GO_WITH_HRESULT(S_OK); } // // Parse TrustedPlatformAssemblies // m_pTrustedPlatformAssemblyMap = new SimpleNameToFileNameMap(); sTrustedPlatformAssemblies.Normalize(); for (SString::Iterator i = sTrustedPlatformAssemblies.Begin(); i != sTrustedPlatformAssemblies.End(); ) { SString fileName; SString simpleName; bool isNativeImage = false; HRESULT pathResult = S_OK; IF_FAIL_GO(pathResult = GetNextTPAPath(sTrustedPlatformAssemblies, i, /*dllOnly*/ false, fileName, simpleName, isNativeImage)); if (pathResult == S_FALSE) { break; } const SimpleNameToFileNameMapEntry *pExistingEntry = m_pTrustedPlatformAssemblyMap->LookupPtr(simpleName.GetUnicode()); if (pExistingEntry != nullptr) { // // We want to store only the first entry matching a simple name we encounter. // The exception is if we first store an IL reference and later in the string // we encounter a native image. Since we don't touch IL in the presence of // native images, we replace the IL entry with the NI. // if ((pExistingEntry->m_wszILFileName != nullptr && !isNativeImage) || (pExistingEntry->m_wszNIFileName != nullptr && isNativeImage)) { continue; } } LPWSTR wszSimpleName = nullptr; if (pExistingEntry == nullptr) { wszSimpleName = new WCHAR[simpleName.GetCount() + 1]; if (wszSimpleName == nullptr) { GO_WITH_HRESULT(E_OUTOFMEMORY); } wcscpy_s(wszSimpleName, simpleName.GetCount() + 1, simpleName.GetUnicode()); } else { wszSimpleName = pExistingEntry->m_wszSimpleName; } LPWSTR wszFileName = new WCHAR[fileName.GetCount() + 1]; if (wszFileName == nullptr) { GO_WITH_HRESULT(E_OUTOFMEMORY); } wcscpy_s(wszFileName, fileName.GetCount() + 1, fileName.GetUnicode()); SimpleNameToFileNameMapEntry mapEntry; mapEntry.m_wszSimpleName = wszSimpleName; if (isNativeImage) { mapEntry.m_wszNIFileName = wszFileName; mapEntry.m_wszILFileName = pExistingEntry == nullptr ? nullptr : pExistingEntry->m_wszILFileName; } else { mapEntry.m_wszILFileName = wszFileName; mapEntry.m_wszNIFileName = pExistingEntry == nullptr ? nullptr : pExistingEntry->m_wszNIFileName; } m_pTrustedPlatformAssemblyMap->AddOrReplace(mapEntry); } // // Parse PlatformResourceRoots // sPlatformResourceRoots.Normalize(); for (SString::Iterator i = sPlatformResourceRoots.Begin(); i != sPlatformResourceRoots.End(); ) { SString pathName; HRESULT pathResult = S_OK; IF_FAIL_GO(pathResult = GetNextPath(sPlatformResourceRoots, i, pathName)); if (pathResult == S_FALSE) { break; } if (Path::IsRelative(pathName)) { GO_WITH_HRESULT(E_INVALIDARG); } m_platformResourceRoots.Append(pathName); } // // Parse AppPaths // sAppPaths.Normalize(); for (SString::Iterator i = sAppPaths.Begin(); i != sAppPaths.End(); ) { SString pathName; HRESULT pathResult = S_OK; IF_FAIL_GO(pathResult = GetNextPath(sAppPaths, i, pathName)); if (pathResult == S_FALSE) { break; } if (Path::IsRelative(pathName)) { GO_WITH_HRESULT(E_INVALIDARG); } m_appPaths.Append(pathName); } Exit: return hr; } HRESULT ApplicationContext::GetAssemblyIdentity(LPCSTR szTextualIdentity, AssemblyIdentityUTF8 **ppAssemblyIdentity) { HRESULT hr = S_OK; _ASSERTE(szTextualIdentity != NULL); _ASSERTE(ppAssemblyIdentity != NULL); CRITSEC_Holder contextLock(GetCriticalSectionCookie()); AssemblyIdentityUTF8 *pAssemblyIdentity = m_assemblyIdentityCache.Lookup(szTextualIdentity); if (pAssemblyIdentity == NULL) { NewHolder<AssemblyIdentityUTF8> pNewAssemblyIdentity; SString sTextualIdentity; SAFE_NEW(pNewAssemblyIdentity, AssemblyIdentityUTF8); sTextualIdentity.SetUTF8(szTextualIdentity); IF_FAIL_GO(TextualIdentityParser::Parse(sTextualIdentity, pNewAssemblyIdentity)); IF_FAIL_GO(m_assemblyIdentityCache.Add(szTextualIdentity, pNewAssemblyIdentity)); pNewAssemblyIdentity->PopulateUTF8Fields(); pAssemblyIdentity = pNewAssemblyIdentity.Extract(); } *ppAssemblyIdentity = pAssemblyIdentity; Exit: return hr; } bool ApplicationContext::IsTpaListProvided() { return m_pTrustedPlatformAssemblyMap != nullptr; } };
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ============================================================ // // ApplicationContext.cpp // // // Implements the ApplicationContext class // // ============================================================ #include "applicationcontext.hpp" #include "stringarraylist.h" #include "loadcontext.hpp" #include "failurecache.hpp" #include "assemblyidentitycache.hpp" #include "utils.hpp" #include "ex.h" #include "clr/fs/path.h" using namespace clr::fs; namespace BINDER_SPACE { ApplicationContext::ApplicationContext() { m_pExecutionContext = NULL; m_pFailureCache = NULL; m_contextCS = NULL; m_pTrustedPlatformAssemblyMap = nullptr; } ApplicationContext::~ApplicationContext() { SAFE_DELETE(m_pExecutionContext); SAFE_DELETE(m_pFailureCache); if (m_contextCS != NULL) { ClrDeleteCriticalSection(m_contextCS); } if (m_pTrustedPlatformAssemblyMap != nullptr) { delete m_pTrustedPlatformAssemblyMap; } } HRESULT ApplicationContext::Init() { HRESULT hr = S_OK; NewHolder<ExecutionContext> pExecutionContext; FailureCache *pFailureCache = NULL; // Allocate context objects SAFE_NEW(pExecutionContext, ExecutionContext); SAFE_NEW(pFailureCache, FailureCache); m_contextCS = ClrCreateCriticalSection( CrstFusionAppCtx, CRST_REENTRANCY); if (!m_contextCS) { SAFE_DELETE(pFailureCache); hr = E_OUTOFMEMORY; } else { m_pExecutionContext = pExecutionContext.Extract(); m_pFailureCache = pFailureCache; } Exit: return hr; } HRESULT ApplicationContext::SetupBindingPaths(SString &sTrustedPlatformAssemblies, SString &sPlatformResourceRoots, SString &sAppPaths, BOOL fAcquireLock) { HRESULT hr = S_OK; CRITSEC_Holder contextLock(fAcquireLock ? GetCriticalSectionCookie() : NULL); if (m_pTrustedPlatformAssemblyMap != nullptr) { GO_WITH_HRESULT(S_OK); } // // Parse TrustedPlatformAssemblies // m_pTrustedPlatformAssemblyMap = new SimpleNameToFileNameMap(); sTrustedPlatformAssemblies.Normalize(); for (SString::Iterator i = sTrustedPlatformAssemblies.Begin(); i != sTrustedPlatformAssemblies.End(); ) { SString fileName; SString simpleName; bool isNativeImage = false; HRESULT pathResult = S_OK; IF_FAIL_GO(pathResult = GetNextTPAPath(sTrustedPlatformAssemblies, i, /*dllOnly*/ false, fileName, simpleName, isNativeImage)); if (pathResult == S_FALSE) { break; } const SimpleNameToFileNameMapEntry *pExistingEntry = m_pTrustedPlatformAssemblyMap->LookupPtr(simpleName.GetUnicode()); if (pExistingEntry != nullptr) { // // We want to store only the first entry matching a simple name we encounter. // The exception is if we first store an IL reference and later in the string // we encounter a native image. Since we don't touch IL in the presence of // native images, we replace the IL entry with the NI. // if ((pExistingEntry->m_wszILFileName != nullptr && !isNativeImage) || (pExistingEntry->m_wszNIFileName != nullptr && isNativeImage)) { continue; } } LPWSTR wszSimpleName = nullptr; if (pExistingEntry == nullptr) { wszSimpleName = new WCHAR[simpleName.GetCount() + 1]; if (wszSimpleName == nullptr) { GO_WITH_HRESULT(E_OUTOFMEMORY); } wcscpy_s(wszSimpleName, simpleName.GetCount() + 1, simpleName.GetUnicode()); } else { wszSimpleName = pExistingEntry->m_wszSimpleName; } LPWSTR wszFileName = new WCHAR[fileName.GetCount() + 1]; if (wszFileName == nullptr) { GO_WITH_HRESULT(E_OUTOFMEMORY); } wcscpy_s(wszFileName, fileName.GetCount() + 1, fileName.GetUnicode()); SimpleNameToFileNameMapEntry mapEntry; mapEntry.m_wszSimpleName = wszSimpleName; if (isNativeImage) { mapEntry.m_wszNIFileName = wszFileName; mapEntry.m_wszILFileName = pExistingEntry == nullptr ? nullptr : pExistingEntry->m_wszILFileName; } else { mapEntry.m_wszILFileName = wszFileName; mapEntry.m_wszNIFileName = pExistingEntry == nullptr ? nullptr : pExistingEntry->m_wszNIFileName; } m_pTrustedPlatformAssemblyMap->AddOrReplace(mapEntry); } // // Parse PlatformResourceRoots // sPlatformResourceRoots.Normalize(); for (SString::Iterator i = sPlatformResourceRoots.Begin(); i != sPlatformResourceRoots.End(); ) { SString pathName; HRESULT pathResult = S_OK; IF_FAIL_GO(pathResult = GetNextPath(sPlatformResourceRoots, i, pathName)); if (pathResult == S_FALSE) { break; } if (Path::IsRelative(pathName)) { GO_WITH_HRESULT(E_INVALIDARG); } m_platformResourceRoots.Append(pathName); } // // Parse AppPaths // sAppPaths.Normalize(); for (SString::Iterator i = sAppPaths.Begin(); i != sAppPaths.End(); ) { SString pathName; HRESULT pathResult = S_OK; IF_FAIL_GO(pathResult = GetNextPath(sAppPaths, i, pathName)); if (pathResult == S_FALSE) { break; } if (Path::IsRelative(pathName)) { GO_WITH_HRESULT(E_INVALIDARG); } m_appPaths.Append(pathName); } Exit: return hr; } HRESULT ApplicationContext::GetAssemblyIdentity(LPCSTR szTextualIdentity, AssemblyIdentityUTF8 **ppAssemblyIdentity) { HRESULT hr = S_OK; _ASSERTE(szTextualIdentity != NULL); _ASSERTE(ppAssemblyIdentity != NULL); CRITSEC_Holder contextLock(GetCriticalSectionCookie()); AssemblyIdentityUTF8 *pAssemblyIdentity = m_assemblyIdentityCache.Lookup(szTextualIdentity); if (pAssemblyIdentity == NULL) { NewHolder<AssemblyIdentityUTF8> pNewAssemblyIdentity; SString sTextualIdentity; SAFE_NEW(pNewAssemblyIdentity, AssemblyIdentityUTF8); sTextualIdentity.SetUTF8(szTextualIdentity); IF_FAIL_GO(TextualIdentityParser::Parse(sTextualIdentity, pNewAssemblyIdentity)); IF_FAIL_GO(m_assemblyIdentityCache.Add(szTextualIdentity, pNewAssemblyIdentity)); pNewAssemblyIdentity->PopulateUTF8Fields(); pAssemblyIdentity = pNewAssemblyIdentity.Extract(); } *ppAssemblyIdentity = pAssemblyIdentity; Exit: return hr; } bool ApplicationContext::IsTpaListProvided() { return m_pTrustedPlatformAssemblyMap != nullptr; } };
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./eng/common/cross/toolchain.cmake
set(CROSS_ROOTFS $ENV{ROOTFS_DIR}) set(TARGET_ARCH_NAME $ENV{TARGET_BUILD_ARCH}) if(EXISTS ${CROSS_ROOTFS}/bin/freebsd-version) set(CMAKE_SYSTEM_NAME FreeBSD) set(FREEBSD 1) elseif(EXISTS ${CROSS_ROOTFS}/usr/platform/i86pc) set(CMAKE_SYSTEM_NAME SunOS) set(ILLUMOS 1) else() set(CMAKE_SYSTEM_NAME Linux) set(LINUX 1) endif() set(CMAKE_SYSTEM_VERSION 1) if(EXISTS ${CROSS_ROOTFS}/etc/tizen-release) set(TIZEN 1) elseif(EXISTS ${CROSS_ROOTFS}/android_platform) set(ANDROID 1) endif() if(TARGET_ARCH_NAME STREQUAL "armel") set(CMAKE_SYSTEM_PROCESSOR armv7l) set(TOOLCHAIN "arm-linux-gnueabi") if(TIZEN) set(TIZEN_TOOLCHAIN "armv7l-tizen-linux-gnueabi/9.2.0") endif() elseif(TARGET_ARCH_NAME STREQUAL "arm") set(CMAKE_SYSTEM_PROCESSOR armv7l) if(EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/armv7-alpine-linux-musleabihf) set(TOOLCHAIN "armv7-alpine-linux-musleabihf") elseif(EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/armv6-alpine-linux-musleabihf) set(TOOLCHAIN "armv6-alpine-linux-musleabihf") else() set(TOOLCHAIN "arm-linux-gnueabihf") endif() if(TIZEN) set(TIZEN_TOOLCHAIN "armv7hl-tizen-linux-gnueabihf/9.2.0") endif() elseif(TARGET_ARCH_NAME STREQUAL "armv6") set(CMAKE_SYSTEM_PROCESSOR armv6l) if(EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/armv6-alpine-linux-musleabihf) set(TOOLCHAIN "armv6-alpine-linux-musleabihf") else() set(TOOLCHAIN "arm-linux-gnueabihf") endif() elseif(TARGET_ARCH_NAME STREQUAL "arm64") set(CMAKE_SYSTEM_PROCESSOR aarch64) if(EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/aarch64-alpine-linux-musl) set(TOOLCHAIN "aarch64-alpine-linux-musl") else() set(TOOLCHAIN "aarch64-linux-gnu") endif() if(TIZEN) set(TIZEN_TOOLCHAIN "aarch64-tizen-linux-gnu/9.2.0") endif() elseif(TARGET_ARCH_NAME STREQUAL "ppc64le") set(CMAKE_SYSTEM_PROCESSOR ppc64le) set(TOOLCHAIN "powerpc64le-linux-gnu") elseif(TARGET_ARCH_NAME STREQUAL "s390x") set(CMAKE_SYSTEM_PROCESSOR s390x) set(TOOLCHAIN "s390x-linux-gnu") elseif(TARGET_ARCH_NAME STREQUAL "x86") set(CMAKE_SYSTEM_PROCESSOR i686) set(TOOLCHAIN "i686-linux-gnu") elseif (FREEBSD) set(CMAKE_SYSTEM_PROCESSOR "x86_64") set(triple "x86_64-unknown-freebsd12") elseif (ILLUMOS) set(CMAKE_SYSTEM_PROCESSOR "x86_64") set(TOOLCHAIN "x86_64-illumos") else() message(FATAL_ERROR "Arch is ${TARGET_ARCH_NAME}. Only armel, arm, armv6, arm64, ppc64le, s390x and x86 are supported!") endif() if(DEFINED ENV{TOOLCHAIN}) set(TOOLCHAIN $ENV{TOOLCHAIN}) endif() # Specify include paths if(TIZEN) if(TARGET_ARCH_NAME STREQUAL "arm") include_directories(SYSTEM ${CROSS_ROOTFS}/usr/lib/gcc/${TIZEN_TOOLCHAIN}/include/c++/) include_directories(SYSTEM ${CROSS_ROOTFS}/usr/lib/gcc/${TIZEN_TOOLCHAIN}/include/c++/armv7hl-tizen-linux-gnueabihf) endif() if(TARGET_ARCH_NAME STREQUAL "armel") include_directories(SYSTEM ${CROSS_ROOTFS}/usr/lib/gcc/${TIZEN_TOOLCHAIN}/include/c++/) include_directories(SYSTEM ${CROSS_ROOTFS}/usr/lib/gcc/${TIZEN_TOOLCHAIN}/include/c++/armv7l-tizen-linux-gnueabi) endif() if(TARGET_ARCH_NAME STREQUAL "arm64") include_directories(SYSTEM ${CROSS_ROOTFS}/usr/lib64/gcc/${TIZEN_TOOLCHAIN}/include/c++/) include_directories(SYSTEM ${CROSS_ROOTFS}/usr/lib64/gcc/${TIZEN_TOOLCHAIN}/include/c++/aarch64-tizen-linux-gnu) endif() endif() if(ANDROID) if(TARGET_ARCH_NAME STREQUAL "arm") set(ANDROID_ABI armeabi-v7a) elseif(TARGET_ARCH_NAME STREQUAL "arm64") set(ANDROID_ABI arm64-v8a) endif() # extract platform number required by the NDK's toolchain file(READ "${CROSS_ROOTFS}/android_platform" RID_FILE_CONTENTS) string(REPLACE "RID=" "" ANDROID_RID "${RID_FILE_CONTENTS}") string(REGEX REPLACE ".*\\.([0-9]+)-.*" "\\1" ANDROID_PLATFORM "${ANDROID_RID}") set(ANDROID_TOOLCHAIN clang) set(FEATURE_EVENT_TRACE 0) # disable event trace as there is no lttng-ust package in termux repository set(CMAKE_SYSTEM_LIBRARY_PATH "${CROSS_ROOTFS}/usr/lib") set(CMAKE_SYSTEM_INCLUDE_PATH "${CROSS_ROOTFS}/usr/include") # include official NDK toolchain script include(${CROSS_ROOTFS}/../build/cmake/android.toolchain.cmake) elseif(FREEBSD) # we cross-compile by instructing clang set(CMAKE_C_COMPILER_TARGET ${triple}) set(CMAKE_CXX_COMPILER_TARGET ${triple}) set(CMAKE_ASM_COMPILER_TARGET ${triple}) set(CMAKE_SYSROOT "${CROSS_ROOTFS}") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=lld") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=lld") set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fuse-ld=lld") elseif(ILLUMOS) set(CMAKE_SYSROOT "${CROSS_ROOTFS}") include_directories(SYSTEM ${CROSS_ROOTFS}/include) set(TOOLSET_PREFIX ${TOOLCHAIN}-) function(locate_toolchain_exec exec var) string(TOUPPER ${exec} EXEC_UPPERCASE) if(NOT "$ENV{CLR_${EXEC_UPPERCASE}}" STREQUAL "") set(${var} "$ENV{CLR_${EXEC_UPPERCASE}}" PARENT_SCOPE) return() endif() find_program(EXEC_LOCATION_${exec} NAMES "${TOOLSET_PREFIX}${exec}${CLR_CMAKE_COMPILER_FILE_NAME_VERSION}" "${TOOLSET_PREFIX}${exec}") if (EXEC_LOCATION_${exec} STREQUAL "EXEC_LOCATION_${exec}-NOTFOUND") message(FATAL_ERROR "Unable to find toolchain executable. Name: ${exec}, Prefix: ${TOOLSET_PREFIX}.") endif() set(${var} ${EXEC_LOCATION_${exec}} PARENT_SCOPE) endfunction() set(CMAKE_SYSTEM_PREFIX_PATH "${CROSS_ROOTFS}") locate_toolchain_exec(gcc CMAKE_C_COMPILER) locate_toolchain_exec(g++ CMAKE_CXX_COMPILER) set(CMAKE_C_STANDARD_LIBRARIES "${CMAKE_C_STANDARD_LIBRARIES} -lssp") set(CMAKE_CXX_STANDARD_LIBRARIES "${CMAKE_CXX_STANDARD_LIBRARIES} -lssp") else() set(CMAKE_SYSROOT "${CROSS_ROOTFS}") set(CMAKE_C_COMPILER_EXTERNAL_TOOLCHAIN "${CROSS_ROOTFS}/usr") set(CMAKE_CXX_COMPILER_EXTERNAL_TOOLCHAIN "${CROSS_ROOTFS}/usr") set(CMAKE_ASM_COMPILER_EXTERNAL_TOOLCHAIN "${CROSS_ROOTFS}/usr") endif() # Specify link flags function(add_toolchain_linker_flag Flag) set(Config "${ARGV1}") set(CONFIG_SUFFIX "") if (NOT Config STREQUAL "") set(CONFIG_SUFFIX "_${Config}") endif() set("CMAKE_EXE_LINKER_FLAGS${CONFIG_SUFFIX}_INIT" "${CMAKE_EXE_LINKER_FLAGS${CONFIG_SUFFIX}_INIT} ${Flag}" PARENT_SCOPE) set("CMAKE_SHARED_LINKER_FLAGS${CONFIG_SUFFIX}_INIT" "${CMAKE_SHARED_LINKER_FLAGS${CONFIG_SUFFIX}_INIT} ${Flag}" PARENT_SCOPE) endfunction() if(LINUX) add_toolchain_linker_flag("-Wl,--rpath-link=${CROSS_ROOTFS}/lib/${TOOLCHAIN}") add_toolchain_linker_flag("-Wl,--rpath-link=${CROSS_ROOTFS}/usr/lib/${TOOLCHAIN}") endif() if(TARGET_ARCH_NAME MATCHES "^(arm|armel)$") if(TIZEN) add_toolchain_linker_flag("-B${CROSS_ROOTFS}/usr/lib/gcc/${TIZEN_TOOLCHAIN}") add_toolchain_linker_flag("-L${CROSS_ROOTFS}/lib") add_toolchain_linker_flag("-L${CROSS_ROOTFS}/usr/lib") add_toolchain_linker_flag("-L${CROSS_ROOTFS}/usr/lib/gcc/${TIZEN_TOOLCHAIN}") endif() elseif(TARGET_ARCH_NAME STREQUAL "arm64") if(TIZEN) add_toolchain_linker_flag("-B${CROSS_ROOTFS}/usr/lib64/gcc/${TIZEN_TOOLCHAIN}") add_toolchain_linker_flag("-L${CROSS_ROOTFS}/lib64") add_toolchain_linker_flag("-L${CROSS_ROOTFS}/usr/lib64") add_toolchain_linker_flag("-L${CROSS_ROOTFS}/usr/lib64/gcc/${TIZEN_TOOLCHAIN}") add_toolchain_linker_flag("-Wl,--rpath-link=${CROSS_ROOTFS}/lib64") add_toolchain_linker_flag("-Wl,--rpath-link=${CROSS_ROOTFS}/usr/lib64") add_toolchain_linker_flag("-Wl,--rpath-link=${CROSS_ROOTFS}/usr/lib64/gcc/${TIZEN_TOOLCHAIN}") endif() elseif(TARGET_ARCH_NAME STREQUAL "x86") add_toolchain_linker_flag(-m32) elseif(ILLUMOS) add_toolchain_linker_flag("-L${CROSS_ROOTFS}/lib/amd64") add_toolchain_linker_flag("-L${CROSS_ROOTFS}/usr/amd64/lib") endif() # Specify compile options if((TARGET_ARCH_NAME MATCHES "^(arm|armv6|armel|arm64|ppc64le|s390x)$" AND NOT ANDROID) OR ILLUMOS) set(CMAKE_C_COMPILER_TARGET ${TOOLCHAIN}) set(CMAKE_CXX_COMPILER_TARGET ${TOOLCHAIN}) set(CMAKE_ASM_COMPILER_TARGET ${TOOLCHAIN}) endif() if(TARGET_ARCH_NAME MATCHES "^(arm|armel)$") add_compile_options(-mthumb) if (NOT DEFINED CLR_ARM_FPU_TYPE) set (CLR_ARM_FPU_TYPE vfpv3) endif (NOT DEFINED CLR_ARM_FPU_TYPE) add_compile_options (-mfpu=${CLR_ARM_FPU_TYPE}) if (NOT DEFINED CLR_ARM_FPU_CAPABILITY) set (CLR_ARM_FPU_CAPABILITY 0x7) endif (NOT DEFINED CLR_ARM_FPU_CAPABILITY) add_definitions (-DCLR_ARM_FPU_CAPABILITY=${CLR_ARM_FPU_CAPABILITY}) if(TARGET_ARCH_NAME STREQUAL "armel") add_compile_options(-mfloat-abi=softfp) endif() elseif(TARGET_ARCH_NAME STREQUAL "x86") add_compile_options(-m32) add_compile_options(-Wno-error=unused-command-line-argument) endif() if(TIZEN) if(TARGET_ARCH_NAME MATCHES "^(arm|armel|arm64)$") add_compile_options(-Wno-deprecated-declarations) # compile-time option add_compile_options(-D__extern_always_inline=inline) # compile-time option endif() endif() # Set LLDB include and library paths for builds that need lldb. if(TARGET_ARCH_NAME MATCHES "^(arm|armel|x86)$") if(TARGET_ARCH_NAME STREQUAL "x86") set(LLVM_CROSS_DIR "$ENV{LLVM_CROSS_HOME}") else() # arm/armel case set(LLVM_CROSS_DIR "$ENV{LLVM_ARM_HOME}") endif() if(LLVM_CROSS_DIR) set(WITH_LLDB_LIBS "${LLVM_CROSS_DIR}/lib/" CACHE STRING "") set(WITH_LLDB_INCLUDES "${LLVM_CROSS_DIR}/include" CACHE STRING "") set(LLDB_H "${WITH_LLDB_INCLUDES}" CACHE STRING "") set(LLDB "${LLVM_CROSS_DIR}/lib/liblldb.so" CACHE STRING "") else() if(TARGET_ARCH_NAME STREQUAL "x86") set(WITH_LLDB_LIBS "${CROSS_ROOTFS}/usr/lib/i386-linux-gnu" CACHE STRING "") set(CHECK_LLVM_DIR "${CROSS_ROOTFS}/usr/lib/llvm-3.8/include") if(EXISTS "${CHECK_LLVM_DIR}" AND IS_DIRECTORY "${CHECK_LLVM_DIR}") set(WITH_LLDB_INCLUDES "${CHECK_LLVM_DIR}") else() set(WITH_LLDB_INCLUDES "${CROSS_ROOTFS}/usr/lib/llvm-3.6/include") endif() else() # arm/armel case set(WITH_LLDB_LIBS "${CROSS_ROOTFS}/usr/lib/${TOOLCHAIN}" CACHE STRING "") set(WITH_LLDB_INCLUDES "${CROSS_ROOTFS}/usr/lib/llvm-3.6/include" CACHE STRING "") endif() endif() endif() set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
set(CROSS_ROOTFS $ENV{ROOTFS_DIR}) set(TARGET_ARCH_NAME $ENV{TARGET_BUILD_ARCH}) if(EXISTS ${CROSS_ROOTFS}/bin/freebsd-version) set(CMAKE_SYSTEM_NAME FreeBSD) set(FREEBSD 1) elseif(EXISTS ${CROSS_ROOTFS}/usr/platform/i86pc) set(CMAKE_SYSTEM_NAME SunOS) set(ILLUMOS 1) else() set(CMAKE_SYSTEM_NAME Linux) set(LINUX 1) endif() set(CMAKE_SYSTEM_VERSION 1) if(EXISTS ${CROSS_ROOTFS}/etc/tizen-release) set(TIZEN 1) elseif(EXISTS ${CROSS_ROOTFS}/android_platform) set(ANDROID 1) endif() if(TARGET_ARCH_NAME STREQUAL "armel") set(CMAKE_SYSTEM_PROCESSOR armv7l) set(TOOLCHAIN "arm-linux-gnueabi") if(TIZEN) set(TIZEN_TOOLCHAIN "armv7l-tizen-linux-gnueabi/9.2.0") endif() elseif(TARGET_ARCH_NAME STREQUAL "arm") set(CMAKE_SYSTEM_PROCESSOR armv7l) if(EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/armv7-alpine-linux-musleabihf) set(TOOLCHAIN "armv7-alpine-linux-musleabihf") elseif(EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/armv6-alpine-linux-musleabihf) set(TOOLCHAIN "armv6-alpine-linux-musleabihf") else() set(TOOLCHAIN "arm-linux-gnueabihf") endif() if(TIZEN) set(TIZEN_TOOLCHAIN "armv7hl-tizen-linux-gnueabihf/9.2.0") endif() elseif(TARGET_ARCH_NAME STREQUAL "armv6") set(CMAKE_SYSTEM_PROCESSOR armv6l) if(EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/armv6-alpine-linux-musleabihf) set(TOOLCHAIN "armv6-alpine-linux-musleabihf") else() set(TOOLCHAIN "arm-linux-gnueabihf") endif() elseif(TARGET_ARCH_NAME STREQUAL "arm64") set(CMAKE_SYSTEM_PROCESSOR aarch64) if(EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/aarch64-alpine-linux-musl) set(TOOLCHAIN "aarch64-alpine-linux-musl") else() set(TOOLCHAIN "aarch64-linux-gnu") endif() if(TIZEN) set(TIZEN_TOOLCHAIN "aarch64-tizen-linux-gnu/9.2.0") endif() elseif(TARGET_ARCH_NAME STREQUAL "ppc64le") set(CMAKE_SYSTEM_PROCESSOR ppc64le) set(TOOLCHAIN "powerpc64le-linux-gnu") elseif(TARGET_ARCH_NAME STREQUAL "s390x") set(CMAKE_SYSTEM_PROCESSOR s390x) set(TOOLCHAIN "s390x-linux-gnu") elseif(TARGET_ARCH_NAME STREQUAL "x86") set(CMAKE_SYSTEM_PROCESSOR i686) set(TOOLCHAIN "i686-linux-gnu") elseif (FREEBSD) set(CMAKE_SYSTEM_PROCESSOR "x86_64") set(triple "x86_64-unknown-freebsd12") elseif (ILLUMOS) set(CMAKE_SYSTEM_PROCESSOR "x86_64") set(TOOLCHAIN "x86_64-illumos") else() message(FATAL_ERROR "Arch is ${TARGET_ARCH_NAME}. Only armel, arm, armv6, arm64, ppc64le, s390x and x86 are supported!") endif() if(DEFINED ENV{TOOLCHAIN}) set(TOOLCHAIN $ENV{TOOLCHAIN}) endif() # Specify include paths if(TIZEN) if(TARGET_ARCH_NAME STREQUAL "arm") include_directories(SYSTEM ${CROSS_ROOTFS}/usr/lib/gcc/${TIZEN_TOOLCHAIN}/include/c++/) include_directories(SYSTEM ${CROSS_ROOTFS}/usr/lib/gcc/${TIZEN_TOOLCHAIN}/include/c++/armv7hl-tizen-linux-gnueabihf) endif() if(TARGET_ARCH_NAME STREQUAL "armel") include_directories(SYSTEM ${CROSS_ROOTFS}/usr/lib/gcc/${TIZEN_TOOLCHAIN}/include/c++/) include_directories(SYSTEM ${CROSS_ROOTFS}/usr/lib/gcc/${TIZEN_TOOLCHAIN}/include/c++/armv7l-tizen-linux-gnueabi) endif() if(TARGET_ARCH_NAME STREQUAL "arm64") include_directories(SYSTEM ${CROSS_ROOTFS}/usr/lib64/gcc/${TIZEN_TOOLCHAIN}/include/c++/) include_directories(SYSTEM ${CROSS_ROOTFS}/usr/lib64/gcc/${TIZEN_TOOLCHAIN}/include/c++/aarch64-tizen-linux-gnu) endif() endif() if(ANDROID) if(TARGET_ARCH_NAME STREQUAL "arm") set(ANDROID_ABI armeabi-v7a) elseif(TARGET_ARCH_NAME STREQUAL "arm64") set(ANDROID_ABI arm64-v8a) endif() # extract platform number required by the NDK's toolchain file(READ "${CROSS_ROOTFS}/android_platform" RID_FILE_CONTENTS) string(REPLACE "RID=" "" ANDROID_RID "${RID_FILE_CONTENTS}") string(REGEX REPLACE ".*\\.([0-9]+)-.*" "\\1" ANDROID_PLATFORM "${ANDROID_RID}") set(ANDROID_TOOLCHAIN clang) set(FEATURE_EVENT_TRACE 0) # disable event trace as there is no lttng-ust package in termux repository set(CMAKE_SYSTEM_LIBRARY_PATH "${CROSS_ROOTFS}/usr/lib") set(CMAKE_SYSTEM_INCLUDE_PATH "${CROSS_ROOTFS}/usr/include") # include official NDK toolchain script include(${CROSS_ROOTFS}/../build/cmake/android.toolchain.cmake) elseif(FREEBSD) # we cross-compile by instructing clang set(CMAKE_C_COMPILER_TARGET ${triple}) set(CMAKE_CXX_COMPILER_TARGET ${triple}) set(CMAKE_ASM_COMPILER_TARGET ${triple}) set(CMAKE_SYSROOT "${CROSS_ROOTFS}") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=lld") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=lld") set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fuse-ld=lld") elseif(ILLUMOS) set(CMAKE_SYSROOT "${CROSS_ROOTFS}") include_directories(SYSTEM ${CROSS_ROOTFS}/include) set(TOOLSET_PREFIX ${TOOLCHAIN}-) function(locate_toolchain_exec exec var) string(TOUPPER ${exec} EXEC_UPPERCASE) if(NOT "$ENV{CLR_${EXEC_UPPERCASE}}" STREQUAL "") set(${var} "$ENV{CLR_${EXEC_UPPERCASE}}" PARENT_SCOPE) return() endif() find_program(EXEC_LOCATION_${exec} NAMES "${TOOLSET_PREFIX}${exec}${CLR_CMAKE_COMPILER_FILE_NAME_VERSION}" "${TOOLSET_PREFIX}${exec}") if (EXEC_LOCATION_${exec} STREQUAL "EXEC_LOCATION_${exec}-NOTFOUND") message(FATAL_ERROR "Unable to find toolchain executable. Name: ${exec}, Prefix: ${TOOLSET_PREFIX}.") endif() set(${var} ${EXEC_LOCATION_${exec}} PARENT_SCOPE) endfunction() set(CMAKE_SYSTEM_PREFIX_PATH "${CROSS_ROOTFS}") locate_toolchain_exec(gcc CMAKE_C_COMPILER) locate_toolchain_exec(g++ CMAKE_CXX_COMPILER) set(CMAKE_C_STANDARD_LIBRARIES "${CMAKE_C_STANDARD_LIBRARIES} -lssp") set(CMAKE_CXX_STANDARD_LIBRARIES "${CMAKE_CXX_STANDARD_LIBRARIES} -lssp") else() set(CMAKE_SYSROOT "${CROSS_ROOTFS}") set(CMAKE_C_COMPILER_EXTERNAL_TOOLCHAIN "${CROSS_ROOTFS}/usr") set(CMAKE_CXX_COMPILER_EXTERNAL_TOOLCHAIN "${CROSS_ROOTFS}/usr") set(CMAKE_ASM_COMPILER_EXTERNAL_TOOLCHAIN "${CROSS_ROOTFS}/usr") endif() # Specify link flags function(add_toolchain_linker_flag Flag) set(Config "${ARGV1}") set(CONFIG_SUFFIX "") if (NOT Config STREQUAL "") set(CONFIG_SUFFIX "_${Config}") endif() set("CMAKE_EXE_LINKER_FLAGS${CONFIG_SUFFIX}_INIT" "${CMAKE_EXE_LINKER_FLAGS${CONFIG_SUFFIX}_INIT} ${Flag}" PARENT_SCOPE) set("CMAKE_SHARED_LINKER_FLAGS${CONFIG_SUFFIX}_INIT" "${CMAKE_SHARED_LINKER_FLAGS${CONFIG_SUFFIX}_INIT} ${Flag}" PARENT_SCOPE) endfunction() if(LINUX) add_toolchain_linker_flag("-Wl,--rpath-link=${CROSS_ROOTFS}/lib/${TOOLCHAIN}") add_toolchain_linker_flag("-Wl,--rpath-link=${CROSS_ROOTFS}/usr/lib/${TOOLCHAIN}") endif() if(TARGET_ARCH_NAME MATCHES "^(arm|armel)$") if(TIZEN) add_toolchain_linker_flag("-B${CROSS_ROOTFS}/usr/lib/gcc/${TIZEN_TOOLCHAIN}") add_toolchain_linker_flag("-L${CROSS_ROOTFS}/lib") add_toolchain_linker_flag("-L${CROSS_ROOTFS}/usr/lib") add_toolchain_linker_flag("-L${CROSS_ROOTFS}/usr/lib/gcc/${TIZEN_TOOLCHAIN}") endif() elseif(TARGET_ARCH_NAME STREQUAL "arm64") if(TIZEN) add_toolchain_linker_flag("-B${CROSS_ROOTFS}/usr/lib64/gcc/${TIZEN_TOOLCHAIN}") add_toolchain_linker_flag("-L${CROSS_ROOTFS}/lib64") add_toolchain_linker_flag("-L${CROSS_ROOTFS}/usr/lib64") add_toolchain_linker_flag("-L${CROSS_ROOTFS}/usr/lib64/gcc/${TIZEN_TOOLCHAIN}") add_toolchain_linker_flag("-Wl,--rpath-link=${CROSS_ROOTFS}/lib64") add_toolchain_linker_flag("-Wl,--rpath-link=${CROSS_ROOTFS}/usr/lib64") add_toolchain_linker_flag("-Wl,--rpath-link=${CROSS_ROOTFS}/usr/lib64/gcc/${TIZEN_TOOLCHAIN}") endif() elseif(TARGET_ARCH_NAME STREQUAL "x86") add_toolchain_linker_flag(-m32) elseif(ILLUMOS) add_toolchain_linker_flag("-L${CROSS_ROOTFS}/lib/amd64") add_toolchain_linker_flag("-L${CROSS_ROOTFS}/usr/amd64/lib") endif() # Specify compile options if((TARGET_ARCH_NAME MATCHES "^(arm|armv6|armel|arm64|ppc64le|s390x)$" AND NOT ANDROID) OR ILLUMOS) set(CMAKE_C_COMPILER_TARGET ${TOOLCHAIN}) set(CMAKE_CXX_COMPILER_TARGET ${TOOLCHAIN}) set(CMAKE_ASM_COMPILER_TARGET ${TOOLCHAIN}) endif() if(TARGET_ARCH_NAME MATCHES "^(arm|armel)$") add_compile_options(-mthumb) if (NOT DEFINED CLR_ARM_FPU_TYPE) set (CLR_ARM_FPU_TYPE vfpv3) endif (NOT DEFINED CLR_ARM_FPU_TYPE) add_compile_options (-mfpu=${CLR_ARM_FPU_TYPE}) if (NOT DEFINED CLR_ARM_FPU_CAPABILITY) set (CLR_ARM_FPU_CAPABILITY 0x7) endif (NOT DEFINED CLR_ARM_FPU_CAPABILITY) add_definitions (-DCLR_ARM_FPU_CAPABILITY=${CLR_ARM_FPU_CAPABILITY}) if(TARGET_ARCH_NAME STREQUAL "armel") add_compile_options(-mfloat-abi=softfp) endif() elseif(TARGET_ARCH_NAME STREQUAL "x86") add_compile_options(-m32) add_compile_options(-Wno-error=unused-command-line-argument) endif() if(TIZEN) if(TARGET_ARCH_NAME MATCHES "^(arm|armel|arm64)$") add_compile_options(-Wno-deprecated-declarations) # compile-time option add_compile_options(-D__extern_always_inline=inline) # compile-time option endif() endif() # Set LLDB include and library paths for builds that need lldb. if(TARGET_ARCH_NAME MATCHES "^(arm|armel|x86)$") if(TARGET_ARCH_NAME STREQUAL "x86") set(LLVM_CROSS_DIR "$ENV{LLVM_CROSS_HOME}") else() # arm/armel case set(LLVM_CROSS_DIR "$ENV{LLVM_ARM_HOME}") endif() if(LLVM_CROSS_DIR) set(WITH_LLDB_LIBS "${LLVM_CROSS_DIR}/lib/" CACHE STRING "") set(WITH_LLDB_INCLUDES "${LLVM_CROSS_DIR}/include" CACHE STRING "") set(LLDB_H "${WITH_LLDB_INCLUDES}" CACHE STRING "") set(LLDB "${LLVM_CROSS_DIR}/lib/liblldb.so" CACHE STRING "") else() if(TARGET_ARCH_NAME STREQUAL "x86") set(WITH_LLDB_LIBS "${CROSS_ROOTFS}/usr/lib/i386-linux-gnu" CACHE STRING "") set(CHECK_LLVM_DIR "${CROSS_ROOTFS}/usr/lib/llvm-3.8/include") if(EXISTS "${CHECK_LLVM_DIR}" AND IS_DIRECTORY "${CHECK_LLVM_DIR}") set(WITH_LLDB_INCLUDES "${CHECK_LLVM_DIR}") else() set(WITH_LLDB_INCLUDES "${CROSS_ROOTFS}/usr/lib/llvm-3.6/include") endif() else() # arm/armel case set(WITH_LLDB_LIBS "${CROSS_ROOTFS}/usr/lib/${TOOLCHAIN}" CACHE STRING "") set(WITH_LLDB_INCLUDES "${CROSS_ROOTFS}/usr/lib/llvm-3.6/include" CACHE STRING "") endif() endif() endif() set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/tools/CMakeLists.txt
add_subdirectory(SOS) add_subdirectory(superpmi) if (CLR_CMAKE_TARGET_WIN32 AND NOT CLR_CMAKE_CROSS_ARCH) add_subdirectory(GenClrDebugResource) add_subdirectory(InjectResource) install(EXPORT dactabletools DESTINATION dactabletools COMPONENT crosscomponents) endif() if (CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64) add_subdirectory(StressLogAnalyzer) endif()
add_subdirectory(SOS) add_subdirectory(superpmi) if (CLR_CMAKE_TARGET_WIN32 AND NOT CLR_CMAKE_CROSS_ARCH) add_subdirectory(GenClrDebugResource) add_subdirectory(InjectResource) install(EXPORT dactabletools DESTINATION dactabletools COMPONENT crosscomponents) endif() if (CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64) add_subdirectory(StressLogAnalyzer) endif()
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/dft5.txt
Microsoft (R) XSLT Compiler version 2.0.61009 for Microsoft (R) Windows (R) 2005 Framework version 2.0.50727 Copyright (C) Microsoft Corporation 2007. All rights reserved. fatal error : Error saving assembly 'D:\OASys\Working\dft5.dll'. ---> Access is denied. (Exception from HRESULT: 0x80070005 (E_ACCESSDENIED))
Microsoft (R) XSLT Compiler version 2.0.61009 for Microsoft (R) Windows (R) 2005 Framework version 2.0.50727 Copyright (C) Microsoft Corporation 2007. All rights reserved. fatal error : Error saving assembly 'D:\OASys\Working\dft5.dll'. ---> Access is denied. (Exception from HRESULT: 0x80070005 (E_ACCESSDENIED))
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/tests/Interop/PInvoke/CustomMarshalers/CustomMarshalersNative.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <xplatform.h> extern "C" DLL_EXPORT void STDMETHODCALLTYPE Unsupported(void* ptr) { }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <xplatform.h> extern "C" DLL_EXPORT void STDMETHODCALLTYPE Unsupported(void* ptr) { }
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/libraries/System.Runtime.InteropServices/src/MatchingRefApiCompatBaseline.txt
Compat issues with assembly System.Runtime.InteropServices: TypesMustExist : Type 'System.Runtime.InteropServices.AssemblyRegistrationFlags' does not exist in the reference but it does exist in the implementation. CannotChangeAttribute : Attribute 'System.Runtime.Versioning.UnsupportedOSPlatformAttribute' on 'System.Runtime.InteropServices.ComWrappers' changed from '[UnsupportedOSPlatformAttribute("android")]' in the implementation to '[UnsupportedOSPlatformAttribute("android")]' in the reference. TypesMustExist : Type 'System.Runtime.InteropServices.ExporterEventKind' does not exist in the reference but it does exist in the implementation. TypesMustExist : Type 'System.Runtime.InteropServices.IDispatchImplAttribute' does not exist in the reference but it does exist in the implementation. TypesMustExist : Type 'System.Runtime.InteropServices.IDispatchImplType' does not exist in the reference but it does exist in the implementation. TypesMustExist : Type 'System.Runtime.InteropServices.RegistrationClassContext' does not exist in the reference but it does exist in the implementation. TypesMustExist : Type 'System.Runtime.InteropServices.RegistrationConnectionType' does not exist in the reference but it does exist in the implementation. TypesMustExist : Type 'System.Runtime.InteropServices.SetWin32ContextInIDispatchAttribute' does not exist in the reference but it does exist in the implementation. MembersMustExist : Member 'public void System.Runtime.InteropServices.UnmanagedFunctionPointerAttribute..ctor()' does not exist in the reference but it does exist in the implementation. Total Issues: 9
Compat issues with assembly System.Runtime.InteropServices: TypesMustExist : Type 'System.Runtime.InteropServices.AssemblyRegistrationFlags' does not exist in the reference but it does exist in the implementation. CannotChangeAttribute : Attribute 'System.Runtime.Versioning.UnsupportedOSPlatformAttribute' on 'System.Runtime.InteropServices.ComWrappers' changed from '[UnsupportedOSPlatformAttribute("android")]' in the implementation to '[UnsupportedOSPlatformAttribute("android")]' in the reference. TypesMustExist : Type 'System.Runtime.InteropServices.ExporterEventKind' does not exist in the reference but it does exist in the implementation. TypesMustExist : Type 'System.Runtime.InteropServices.IDispatchImplAttribute' does not exist in the reference but it does exist in the implementation. TypesMustExist : Type 'System.Runtime.InteropServices.IDispatchImplType' does not exist in the reference but it does exist in the implementation. TypesMustExist : Type 'System.Runtime.InteropServices.RegistrationClassContext' does not exist in the reference but it does exist in the implementation. TypesMustExist : Type 'System.Runtime.InteropServices.RegistrationConnectionType' does not exist in the reference but it does exist in the implementation. TypesMustExist : Type 'System.Runtime.InteropServices.SetWin32ContextInIDispatchAttribute' does not exist in the reference but it does exist in the implementation. MembersMustExist : Member 'public void System.Runtime.InteropServices.UnmanagedFunctionPointerAttribute..ctor()' does not exist in the reference but it does exist in the implementation. Total Issues: 9
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/vm/i386/excepx86.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // /* EXCEP.CPP: * */ #include "common.h" #include "frames.h" #include "excep.h" #include "object.h" #include "field.h" #include "dbginterface.h" #include "cgensys.h" #include "comutilnative.h" #include "sigformat.h" #include "siginfo.hpp" #include "gcheaputilities.h" #include "eedbginterfaceimpl.h" //so we can clearexception in COMPlusThrow #include "eventtrace.h" #include "eetoprofinterfacewrapper.inl" #include "eedbginterfaceimpl.inl" #include "dllimportcallback.h" #include "threads.h" #include "eeconfig.h" #include "vars.hpp" #include "generics.h" #include "asmconstants.h" #include "virtualcallstub.h" #ifndef FEATURE_EH_FUNCLETS MethodDesc * GetUserMethodForILStub(Thread * pThread, UINT_PTR uStubSP, MethodDesc * pILStubMD, Frame ** ppFrameOut); #if !defined(DACCESS_COMPILE) #define FORMAT_MESSAGE_BUFFER_LENGTH 1024 BOOL ComPlusFrameSEH(EXCEPTION_REGISTRATION_RECORD*); PEXCEPTION_REGISTRATION_RECORD GetPrevSEHRecord(EXCEPTION_REGISTRATION_RECORD*); extern "C" { // in asmhelpers.asm: VOID STDCALL ResumeAtJitEHHelper(EHContext *pContext); int STDCALL CallJitEHFilterHelper(size_t *pShadowSP, EHContext *pContext); VOID STDCALL CallJitEHFinallyHelper(size_t *pShadowSP, EHContext *pContext); typedef void (*RtlUnwindCallbackType)(void); BOOL CallRtlUnwind(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, RtlUnwindCallbackType callback, EXCEPTION_RECORD *pExceptionRecord, void *retval); BOOL CallRtlUnwindSafe(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, RtlUnwindCallbackType callback, EXCEPTION_RECORD *pExceptionRecord, void *retval); } static inline BOOL CPFH_ShouldUnwindStack(const EXCEPTION_RECORD * pCER) { LIMITED_METHOD_CONTRACT; _ASSERTE(pCER != NULL); // We can only unwind those exceptions whose context/record we don't need for a // rethrow. This is complus, and stack overflow. For all the others, we // need to keep the context around for a rethrow, which means they can't // be unwound. if (IsComPlusException(pCER) || pCER->ExceptionCode == STATUS_STACK_OVERFLOW) return TRUE; else return FALSE; } static inline BOOL IsComPlusNestedExceptionRecord(EXCEPTION_REGISTRATION_RECORD* pEHR) { LIMITED_METHOD_CONTRACT; if (pEHR->Handler == (PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler) return TRUE; return FALSE; } EXCEPTION_REGISTRATION_RECORD *TryFindNestedEstablisherFrame(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame) { LIMITED_METHOD_CONTRACT; while (pEstablisherFrame->Handler != (PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler) { pEstablisherFrame = pEstablisherFrame->Next; if (pEstablisherFrame == EXCEPTION_CHAIN_END) return 0; } return pEstablisherFrame; } #ifdef _DEBUG // stores last handler we went to in case we didn't get an endcatch and stack is // corrupted we can figure out who did it. static MethodDesc *gLastResumedExceptionFunc = NULL; static DWORD gLastResumedExceptionHandler = 0; #endif //--------------------------------------------------------------------- // void RtlUnwindCallback() // call back function after global unwind, rtlunwind calls this function //--------------------------------------------------------------------- static void RtlUnwindCallback() { LIMITED_METHOD_CONTRACT; _ASSERTE(!"Should never get here"); } BOOL FastNExportSEH(EXCEPTION_REGISTRATION_RECORD* pEHR) { LIMITED_METHOD_CONTRACT; if ((LPVOID)pEHR->Handler == (LPVOID)FastNExportExceptHandler) return TRUE; return FALSE; } BOOL ReverseCOMSEH(EXCEPTION_REGISTRATION_RECORD* pEHR) { LIMITED_METHOD_CONTRACT; #ifdef FEATURE_COMINTEROP if ((LPVOID)pEHR->Handler == (LPVOID)COMPlusFrameHandlerRevCom) return TRUE; #endif // FEATURE_COMINTEROP return FALSE; } // // Returns true if the given SEH handler is one of our SEH handlers that is responsible for managing exceptions in // regions of managed code. // BOOL IsUnmanagedToManagedSEHHandler(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame) { WRAPPER_NO_CONTRACT; // // ComPlusFrameSEH() is for COMPlusFrameHandler & COMPlusNestedExceptionHandler. // FastNExportSEH() is for FastNExportExceptHandler. // return (ComPlusFrameSEH(pEstablisherFrame) || FastNExportSEH(pEstablisherFrame) || ReverseCOMSEH(pEstablisherFrame)); } Frame *GetCurrFrame(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame) { Frame *pFrame; WRAPPER_NO_CONTRACT; _ASSERTE(IsUnmanagedToManagedSEHHandler(pEstablisherFrame)); pFrame = ((FrameHandlerExRecord *)pEstablisherFrame)->GetCurrFrame(); // Assert that the exception frame is on the thread or that the exception frame is the top frame. _ASSERTE(GetThreadNULLOk() == NULL || GetThread()->GetFrame() == (Frame*)-1 || GetThread()->GetFrame() <= pFrame); return pFrame; } EXCEPTION_REGISTRATION_RECORD* GetNextCOMPlusSEHRecord(EXCEPTION_REGISTRATION_RECORD* pRec) { WRAPPER_NO_CONTRACT; if (pRec == EXCEPTION_CHAIN_END) return EXCEPTION_CHAIN_END; do { _ASSERTE(pRec != 0); pRec = pRec->Next; } while (pRec != EXCEPTION_CHAIN_END && !IsUnmanagedToManagedSEHHandler(pRec)); _ASSERTE(pRec == EXCEPTION_CHAIN_END || IsUnmanagedToManagedSEHHandler(pRec)); return pRec; } /* * GetClrSEHRecordServicingStackPointer * * This function searchs all the Frame SEH records, and finds the one that is * currently signed up to do all exception handling for the given stack pointer * on the given thread. * * Parameters: * pThread - The thread to search on. * pStackPointer - The stack location that we are finding the Frame SEH Record for. * * Returns * A pointer to the SEH record, or EXCEPTION_CHAIN_END if none was found. * */ PEXCEPTION_REGISTRATION_RECORD GetClrSEHRecordServicingStackPointer(Thread *pThread, void *pStackPointer) { ThreadExceptionState* pExState = pThread->GetExceptionState(); // // We can only do this if there is a context in the pExInfo. There are cases (most notably the // EEPolicy::HandleFatalError case) where we don't have that. In these cases we will return // no enclosing handler since we cannot accurately determine the FS:0 entry which services // this stack address. // // The side effect of this is that for these cases, the debugger cannot intercept // the exception // CONTEXT* pContextRecord = pExState->GetContextRecord(); if (pContextRecord == NULL) { return EXCEPTION_CHAIN_END; } void *exceptionSP = dac_cast<PTR_VOID>(GetSP(pContextRecord)); // // Now set the establishing frame. What this means in English is that we need to find // the fs:0 entry that handles exceptions for the place on the stack given in stackPointer. // PEXCEPTION_REGISTRATION_RECORD pSEHRecord = GetFirstCOMPlusSEHRecord(pThread); while (pSEHRecord != EXCEPTION_CHAIN_END) { // // Skip any SEHRecord which is not a CLR record or was pushed after the exception // on this thread occurred. // if (IsUnmanagedToManagedSEHHandler(pSEHRecord) && (exceptionSP <= (void *)pSEHRecord)) { Frame *pFrame = GetCurrFrame(pSEHRecord); // // Arcane knowledge here. All Frame records are stored on the stack by the runtime // in ever decreasing address space. So, we merely have to search back until // we find the first frame record with a higher stack value to find the // establishing frame for the given stack address. // if (((void *)pFrame) >= pStackPointer) { break; } } pSEHRecord = GetNextCOMPlusSEHRecord(pSEHRecord); } return pSEHRecord; } #ifdef _DEBUG // We've deteremined during a stack walk that managed code is transitioning to unamanaged (EE) code. Check that the // state of the EH chain is correct. // // For x86, check that we do INSTALL_COMPLUS_EXCEPTION_HANDLER before calling managed code. This check should be // done for all managed code sites, not just transistions. But this will catch most problem cases. void VerifyValidTransitionFromManagedCode(Thread *pThread, CrawlFrame *pCF) { WRAPPER_NO_CONTRACT; _ASSERTE(ExecutionManager::IsManagedCode(GetControlPC(pCF->GetRegisterSet()))); // Cannot get to the TEB of other threads. So ignore them. if (pThread != GetThreadNULLOk()) { return; } // Find the EH record guarding the current region of managed code, based on the CrawlFrame passed in. PEXCEPTION_REGISTRATION_RECORD pEHR = GetCurrentSEHRecord(); while ((pEHR != EXCEPTION_CHAIN_END) && ((ULONG_PTR)pEHR < GetRegdisplaySP(pCF->GetRegisterSet()))) { pEHR = pEHR->Next; } // VerifyValidTransitionFromManagedCode can be called before the CrawlFrame's MethodDesc is initialized. // Fix that if necessary for the consistency check. MethodDesc * pFunction = pCF->GetFunction(); if ((!IsUnmanagedToManagedSEHHandler(pEHR)) && // Will the assert fire? If not, don't waste our time. (pFunction == NULL)) { _ASSERTE(pCF->GetRegisterSet()); PCODE ip = GetControlPC(pCF->GetRegisterSet()); pFunction = ExecutionManager::GetCodeMethodDesc(ip); _ASSERTE(pFunction); } // Great, we've got the EH record that's next up the stack from the current SP (which is in managed code). That // had better be a record for one of our handlers responsible for handling exceptions in managed code. If its // not, then someone made it into managed code without setting up one of our EH handlers, and that's really // bad. CONSISTENCY_CHECK_MSGF(IsUnmanagedToManagedSEHHandler(pEHR), ("Invalid transition into managed code!\n\n" "We're walking this thread's stack and we've reached a managed frame at Esp=0x%p. " "(The method is %s::%s) " "The very next FS:0 record (0x%p) up from this point on the stack should be one of " "our 'unmanaged to managed SEH handlers', but its not... its something else, and " "that's very bad. It indicates that someone managed to call into managed code without " "setting up the proper exception handling.\n\n" "Get a good unmanaged stack trace for this thread. All FS:0 records are on the stack, " "so you can see who installed the last handler. Somewhere between that function and " "where the thread is now is where the bad transition occurred.\n\n" "A little extra info: FS:0 = 0x%p, pEHR->Handler = 0x%p\n", GetRegdisplaySP(pCF->GetRegisterSet()), pFunction ->m_pszDebugClassName, pFunction ->m_pszDebugMethodName, pEHR, GetCurrentSEHRecord(), pEHR->Handler)); } #endif //================================================================================ // There are some things that should never be true when handling an // exception. This function checks for them. Will assert or trap // if it finds an error. static inline void CPFH_VerifyThreadIsInValidState(Thread* pThread, DWORD exceptionCode, EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame) { WRAPPER_NO_CONTRACT; if ( exceptionCode == STATUS_BREAKPOINT || exceptionCode == STATUS_SINGLE_STEP) { return; } #ifdef _DEBUG // check for overwriting of stack CheckStackBarrier(pEstablisherFrame); // trigger check for bad fs:0 chain GetCurrentSEHRecord(); #endif if (!g_fEEShutDown) { // An exception on the GC thread, or while holding the thread store lock, will likely lock out the entire process. if (::IsGCThread() || ThreadStore::HoldingThreadStore()) { _ASSERTE(!"Exception during garbage collection or while holding thread store"); EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); } } } #ifdef FEATURE_HIJACK void CPFH_AdjustContextForThreadSuspensionRace(CONTEXT *pContext, Thread *pThread) { WRAPPER_NO_CONTRACT; PCODE f_IP = GetIP(pContext); if (Thread::IsAddrOfRedirectFunc((PVOID)f_IP)) { // This is a very rare case where we tried to redirect a thread that was // just about to dispatch an exception, and our update of EIP took, but // the thread continued dispatching the exception. // // If this should happen (very rare) then we fix it up here. // _ASSERTE(pThread->GetSavedRedirectContext()); SetIP(pContext, GetIP(pThread->GetSavedRedirectContext())); STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 1 setting IP = %x\n", pContext->Eip); } if (f_IP == GetEEFuncEntryPoint(THROW_CONTROL_FOR_THREAD_FUNCTION)) { // This is a very rare case where we tried to redirect a thread that was // just about to dispatch an exception, and our update of EIP took, but // the thread continued dispatching the exception. // // If this should happen (very rare) then we fix it up here. // SetIP(pContext, GetIP(pThread->m_OSContext)); STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 2 setting IP = %x\n", pContext->Eip); } // We have another even rarer race condition: // - A) On thread A, Debugger puts an int 3 in the code stream at address X // - A) We hit it and the begin an exception. The eip will be X + 1 (int3 is special) // - B) Meanwhile, thread B redirects A's eip to Y. (Although A is really somewhere // in the kernel, it looks like it's still in user code, so it can fall under the // HandledJitCase and can be redirected) // - A) The OS, trying to be nice, expects we have a breakpoint exception at X+1, // but does -1 on the address since it knows int3 will leave the eip +1. // So the context structure it will pass to the Handler is ideally (X+1)-1 = X // // ** Here's the race: Since thread B redirected A, the eip is actually Y (not X+1), // but the kernel still touches it up to Y-1. So there's a window between when we hit a // bp and when the handler gets called that this can happen. // This causes an unhandled BP (since the debugger doesn't recognize the bp at Y-1) // // So what to do: If we land at Y-1 (ie, if f_IP+1 is the addr of a Redirected Func), // then restore the EIP back to X. This will skip the redirection. // Fortunately, this only occurs in cases where it's ok // to skip. The debugger will recognize the patch and handle it. if (Thread::IsAddrOfRedirectFunc((PVOID)(f_IP + 1))) { _ASSERTE(pThread->GetSavedRedirectContext()); SetIP(pContext, GetIP(pThread->GetSavedRedirectContext()) - 1); STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 3 setting IP = %x\n", pContext->Eip); } if (f_IP + 1 == GetEEFuncEntryPoint(THROW_CONTROL_FOR_THREAD_FUNCTION)) { SetIP(pContext, GetIP(pThread->m_OSContext) - 1); STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 4 setting IP = %x\n", pContext->Eip); } } #endif // FEATURE_HIJACK uint32_t g_exceptionCount; //****************************************************************************** EXCEPTION_DISPOSITION COMPlusAfterUnwind( EXCEPTION_RECORD *pExceptionRecord, EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, ThrowCallbackType& tct) { WRAPPER_NO_CONTRACT; // Note: we've completed the unwind pass up to the establisher frame, and we're headed off to finish our // cleanup and end up back in jitted code. Any more FS0 handlers pushed from this point on out will _not_ be // unwound. We go ahead and assert right here that indeed there are no handlers below the establisher frame // before we go any further. _ASSERTE(pEstablisherFrame == GetCurrentSEHRecord()); Thread* pThread = GetThread(); _ASSERTE(tct.pCurrentExceptionRecord == pEstablisherFrame); NestedHandlerExRecord nestedHandlerExRecord; nestedHandlerExRecord.Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, GetCurrFrame(pEstablisherFrame)); // ... and now, put the nested record back on. INSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg)); // We entered COMPlusAfterUnwind in PREEMP, but we need to be in COOP from here on out GCX_COOP_NO_DTOR(); tct.bIsUnwind = TRUE; tct.pProfilerNotify = NULL; LOG((LF_EH, LL_INFO100, "COMPlusFrameHandler: unwinding\n")); tct.bUnwindStack = CPFH_ShouldUnwindStack(pExceptionRecord); LOG((LF_EH, LL_INFO1000, "COMPlusAfterUnwind: going to: pFunc:%#X, pStack:%#X\n", tct.pFunc, tct.pStack)); UnwindFrames(pThread, &tct); #ifdef DEBUGGING_SUPPORTED ExInfo* pExInfo = pThread->GetExceptionState()->GetCurrentExceptionTracker(); if (pExInfo->m_ValidInterceptionContext) { // By now we should have all unknown FS:[0] handlers unwinded along with the managed Frames until // the interception point. We can now pop nested exception handlers and resume at interception context. EHContext context = pExInfo->m_InterceptionContext; pExInfo->m_InterceptionContext.Init(); pExInfo->m_ValidInterceptionContext = FALSE; UnwindExceptionTrackerAndResumeInInterceptionFrame(pExInfo, &context); } #endif // DEBUGGING_SUPPORTED _ASSERTE(!"Should not get here"); return ExceptionContinueSearch; } // EXCEPTION_DISPOSITION COMPlusAfterUnwind() #ifdef DEBUGGING_SUPPORTED //--------------------------------------------------------------------------------------- // // This function is called to intercept an exception and start an unwind. // // Arguments: // pCurrentEstablisherFrame - the exception registration record covering the stack range // containing the interception point // pExceptionRecord - EXCEPTION_RECORD of the exception being intercepted // // Return Value: // ExceptionContinueSearch if the exception cannot be intercepted // // Notes: // If the exception is intercepted, this function never returns. // EXCEPTION_DISPOSITION ClrDebuggerDoUnwindAndIntercept(EXCEPTION_REGISTRATION_RECORD *pCurrentEstablisherFrame, EXCEPTION_RECORD *pExceptionRecord) { WRAPPER_NO_CONTRACT; if (!CheckThreadExceptionStateForInterception()) { return ExceptionContinueSearch; } Thread* pThread = GetThread(); ThreadExceptionState* pExState = pThread->GetExceptionState(); EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame; ThrowCallbackType tct; tct.Init(); pExState->GetDebuggerState()->GetDebuggerInterceptInfo(&pEstablisherFrame, &(tct.pFunc), &(tct.dHandler), &(tct.pStack), NULL, &(tct.pBottomFrame) ); // // If the handler that we've selected as the handler for the target frame of the unwind is in fact above the // handler that we're currently executing in, then use the current handler instead. Why? Our handlers for // nested exceptions actually process managed frames that live above them, up to the COMPlusFrameHanlder that // pushed the nested handler. If the user selectes a frame above the nested handler, then we will have selected // the COMPlusFrameHandler above the current nested handler. But we don't want to ask RtlUnwind to unwind past // the nested handler that we're currently executing in. // if (pEstablisherFrame > pCurrentEstablisherFrame) { // This should only happen if we're in a COMPlusNestedExceptionHandler. _ASSERTE(IsComPlusNestedExceptionRecord(pCurrentEstablisherFrame)); pEstablisherFrame = pCurrentEstablisherFrame; } #ifdef _DEBUG tct.pCurrentExceptionRecord = pEstablisherFrame; #endif LOG((LF_EH|LF_CORDB, LL_INFO100, "ClrDebuggerDoUnwindAndIntercept: Intercepting at %s\n", tct.pFunc->m_pszDebugMethodName)); LOG((LF_EH|LF_CORDB, LL_INFO100, "\t\t: pFunc is 0x%X\n", tct.pFunc)); LOG((LF_EH|LF_CORDB, LL_INFO100, "\t\t: pStack is 0x%X\n", tct.pStack)); CallRtlUnwindSafe(pEstablisherFrame, RtlUnwindCallback, pExceptionRecord, 0); ExInfo* pExInfo = pThread->GetExceptionState()->GetCurrentExceptionTracker(); if (pExInfo->m_ValidInterceptionContext) { // By now we should have all unknown FS:[0] handlers unwinded along with the managed Frames until // the interception point. We can now pop nested exception handlers and resume at interception context. GCX_COOP(); EHContext context = pExInfo->m_InterceptionContext; pExInfo->m_InterceptionContext.Init(); pExInfo->m_ValidInterceptionContext = FALSE; UnwindExceptionTrackerAndResumeInInterceptionFrame(pExInfo, &context); } // on x86 at least, RtlUnwind always returns // Note: we've completed the unwind pass up to the establisher frame, and we're headed off to finish our // cleanup and end up back in jitted code. Any more FS0 handlers pushed from this point on out will _not_ be // unwound. return COMPlusAfterUnwind(pExState->GetExceptionRecord(), pEstablisherFrame, tct); } // EXCEPTION_DISPOSITION ClrDebuggerDoUnwindAndIntercept() #endif // DEBUGGING_SUPPORTED // This is a wrapper around the assembly routine that invokes RtlUnwind in the OS. // When we invoke RtlUnwind, the OS will modify the ExceptionFlags field in the // exception record to reflect unwind. Since we call RtlUnwind in the first pass // with a valid exception record when we find an exception handler AND because RtlUnwind // returns on x86, the OS would have flagged the exception record for unwind. // // Incase the exception is rethrown from the catch/filter-handler AND it's a non-COMPLUS // exception, the runtime will use the reference to the saved exception record to reraise // the exception, as part of rethrow fixup. Since the OS would have modified the exception record // to reflect unwind, this wrapper will "reset" the ExceptionFlags field when RtlUnwind returns. // Otherwise, the rethrow will result in second pass, as opposed to first, since the ExceptionFlags // would indicate an unwind. // // This rethrow issue does not affect COMPLUS exceptions since we always create a brand new exception // record for them in RaiseTheExceptionInternalOnly. BOOL CallRtlUnwindSafe(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, RtlUnwindCallbackType callback, EXCEPTION_RECORD *pExceptionRecord, void *retval) { LIMITED_METHOD_CONTRACT; // Save the ExceptionFlags value before invoking RtlUnwind. DWORD dwExceptionFlags = pExceptionRecord->ExceptionFlags; BOOL fRetVal = CallRtlUnwind(pEstablisherFrame, callback, pExceptionRecord, retval); // Reset ExceptionFlags field, if applicable if (pExceptionRecord->ExceptionFlags != dwExceptionFlags) { // We would expect the 32bit OS to have set the unwind flag at this point. _ASSERTE(pExceptionRecord->ExceptionFlags & EXCEPTION_UNWINDING); LOG((LF_EH, LL_INFO100, "CallRtlUnwindSafe: Resetting ExceptionFlags from %lu to %lu\n", pExceptionRecord->ExceptionFlags, dwExceptionFlags)); pExceptionRecord->ExceptionFlags = dwExceptionFlags; } return fRetVal; } //****************************************************************************** // The essence of the first pass handler (after we've decided to actually do // the first pass handling). //****************************************************************************** inline EXCEPTION_DISPOSITION __cdecl CPFH_RealFirstPassHandler( // ExceptionContinueSearch, etc. EXCEPTION_RECORD *pExceptionRecord, // The exception record, with exception type. EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, // Exception frame on whose behalf this is called. CONTEXT *pContext, // Context from the exception. void *pDispatcherContext, // @todo BOOL bAsynchronousThreadStop, // @todo BOOL fPGCDisabledOnEntry) // @todo { // We don't want to use a runtime contract here since this codepath is used during // the processing of a hard SO. Contracts use a significant amount of stack // which we can't afford for those cases. STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_COOPERATIVE; #ifdef _DEBUG static int breakOnFirstPass = -1; if (breakOnFirstPass == -1) breakOnFirstPass = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnFirstPass); if (breakOnFirstPass != 0) { _ASSERTE(!"First pass exception handler"); } #endif EXCEPTION_DISPOSITION retval; DWORD exceptionCode = pExceptionRecord->ExceptionCode; Thread *pThread = GetThread(); #ifdef _DEBUG static int breakOnSO = -1; if (breakOnSO == -1) breakOnSO = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_BreakOnSO); if (breakOnSO != 0 && exceptionCode == STATUS_STACK_OVERFLOW) { DebugBreak(); // ASSERTing will overwrite the guard region } #endif // We always want to be in co-operative mode when we run this function and whenever we return // from it, want to go to pre-emptive mode because are returning to OS. _ASSERTE(pThread->PreemptiveGCDisabled()); BOOL bPopNestedHandlerExRecord = FALSE; LFH found = LFH_NOT_FOUND; // Result of calling LookForHandler. BOOL bRethrownException = FALSE; BOOL bNestedException = FALSE; #if defined(USE_FEF) BOOL bPopFaultingExceptionFrame = FALSE; FrameWithCookie<FaultingExceptionFrame> faultingExceptionFrame; #endif // USE_FEF ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); ThrowCallbackType tct; tct.Init(); tct.pTopFrame = GetCurrFrame(pEstablisherFrame); // highest frame to search to #ifdef _DEBUG tct.pCurrentExceptionRecord = pEstablisherFrame; tct.pPrevExceptionRecord = GetPrevSEHRecord(pEstablisherFrame); #endif // _DEBUG BOOL fIsManagedCode = pContext ? ExecutionManager::IsManagedCode(GetIP(pContext)) : FALSE; // this establishes a marker so can determine if are processing a nested exception // don't want to use the current frame to limit search as it could have been unwound by // the time get to nested handler (ie if find an exception, unwind to the call point and // then resume in the catch and then get another exception) so make the nested handler // have the same boundary as this one. If nested handler can't find a handler, we won't // end up searching this frame list twice because the nested handler will set the search // boundary in the thread and so if get back to this handler it will have a range that starts // and ends at the same place. NestedHandlerExRecord nestedHandlerExRecord; nestedHandlerExRecord.Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, GetCurrFrame(pEstablisherFrame)); INSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg)); bPopNestedHandlerExRecord = TRUE; #if defined(USE_FEF) // Note: don't attempt to push a FEF for an exception in managed code if we weren't in cooperative mode when // the exception was received. If preemptive GC was enabled when we received the exception, then it means the // exception was rethrown from unmangaed code (including EE impl), and we shouldn't push a FEF. if (fIsManagedCode && fPGCDisabledOnEntry && (pThread->m_pFrame == FRAME_TOP || pThread->m_pFrame->GetVTablePtr() != FaultingExceptionFrame::GetMethodFrameVPtr() || (size_t)pThread->m_pFrame > (size_t)pEstablisherFrame)) { // setup interrupted frame so that GC during calls to init won't collect the frames // only need it for non COM+ exceptions in managed code when haven't already // got one on the stack (will have one already if we have called rtlunwind because // the instantiation that called unwind would have installed one) faultingExceptionFrame.InitAndLink(pContext); bPopFaultingExceptionFrame = TRUE; } #endif // USE_FEF OBJECTREF e; e = pThread->LastThrownObject(); STRESS_LOG7(LF_EH, LL_INFO10, "CPFH_RealFirstPassHandler: code:%X, LastThrownObject:%p, MT:%pT" ", IP:%p, SP:%p, pContext:%p, pEstablisherFrame:%p\n", exceptionCode, OBJECTREFToObject(e), (e!=0)?e->GetMethodTable():0, pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0, pContext, pEstablisherFrame); #ifdef LOGGING // If it is a complus exception, and there is a thrown object, get its name, for better logging. if (IsComPlusException(pExceptionRecord)) { const char * eClsName = "!EXCEPTION_COMPLUS"; if (e != 0) { eClsName = e->GetMethodTable()->GetDebugClassName(); } LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: exception: 0x%08X, class: '%s', IP: 0x%p\n", exceptionCode, eClsName, pContext ? GetIP(pContext) : NULL)); } #endif EXCEPTION_POINTERS exceptionPointers = {pExceptionRecord, pContext}; STRESS_LOG4(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: setting boundaries: Exinfo: 0x%p, BottomMostHandler:0x%p, SearchBoundary:0x%p, TopFrame:0x%p\n", pExInfo, pExInfo->m_pBottomMostHandler, pExInfo->m_pSearchBoundary, tct.pTopFrame); // Here we are trying to decide if we are coming in as: // 1) first handler in a brand new exception // 2) a subsequent handler in an exception // 3) a nested exception // m_pBottomMostHandler is the registration structure (establisher frame) for the most recent (ie lowest in // memory) non-nested handler that was installed and pEstablisher frame is what the current handler // was registered with. // The OS calls each registered handler in the chain, passing its establisher frame to it. if (pExInfo->m_pBottomMostHandler != NULL && pEstablisherFrame > pExInfo->m_pBottomMostHandler) { STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: detected subsequent handler. ExInfo:0x%p, BottomMost:0x%p SearchBoundary:0x%p\n", pExInfo, pExInfo->m_pBottomMostHandler, pExInfo->m_pSearchBoundary); // If the establisher frame of this handler is greater than the bottommost then it must have been // installed earlier and therefore we are case 2 if (pThread->GetThrowable() == NULL) { // Bottommost didn't setup a throwable, so not exception not for us retval = ExceptionContinueSearch; goto exit; } // setup search start point tct.pBottomFrame = pExInfo->m_pSearchBoundary; if (tct.pTopFrame == tct.pBottomFrame) { // this will happen if our nested handler already searched for us so we don't want // to search again retval = ExceptionContinueSearch; goto exit; } } else { // we are either case 1 or case 3 #if defined(_DEBUG_IMPL) //@todo: merge frames, context, handlers if (pThread->GetFrame() != FRAME_TOP) pThread->GetFrame()->LogFrameChain(LF_EH, LL_INFO1000); #endif // _DEBUG_IMPL // If the exception was rethrown, we'll create a new ExInfo, which will represent the rethrown exception. // The original exception is not the rethrown one. if (pExInfo->m_ExceptionFlags.IsRethrown() && pThread->LastThrownObject() != NULL) { pExInfo->m_ExceptionFlags.ResetIsRethrown(); bRethrownException = TRUE; #if defined(USE_FEF) if (bPopFaultingExceptionFrame) { // if we added a FEF, it will refer to the frame at the point of the original exception which is // already unwound so don't want it. // If we rethrew the exception we have already added a helper frame for the rethrow, so don't // need this one. If we didn't rethrow it, (ie rethrow from native) then there the topmost frame will // be a transition to native frame in which case we don't need it either faultingExceptionFrame.Pop(); bPopFaultingExceptionFrame = FALSE; } #endif } // If the establisher frame is less than the bottommost handler, then this is nested because the // establisher frame was installed after the bottommost. if (pEstablisherFrame < pExInfo->m_pBottomMostHandler /* || IsComPlusNestedExceptionRecord(pEstablisherFrame) */ ) { bNestedException = TRUE; // case 3: this is a nested exception. Need to save and restore the thread info STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: ExInfo:0x%p detected nested exception 0x%p < 0x%p\n", pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler); EXCEPTION_REGISTRATION_RECORD* pNestedER = TryFindNestedEstablisherFrame(pEstablisherFrame); ExInfo *pNestedExInfo; if (!pNestedER || pNestedER >= pExInfo->m_pBottomMostHandler ) { // RARE CASE. We've re-entered the EE from an unmanaged filter. // // OR // // We can be here if we dont find a nested exception handler. This is exemplified using // call chain of scenario 2 explained further below. // // Assuming __try of NativeB throws an exception E1 and it gets caught in ManagedA2, then // bottom-most handler (BMH) is going to be CPFH_A. The catch will trigger an unwind // and invoke __finally in NativeB. Let the __finally throw a new exception E2. // // Assuming ManagedB2 has a catch block to catch E2, when we enter CPFH_B looking for a // handler for E2, our establisher frame will be that of CPFH_B, which will be lower // in stack than current BMH (which is CPFH_A). Thus, we will come here, determining // E2 to be nested exception correctly but not find a nested exception handler. void *limit = (void *) GetPrevSEHRecord(pExInfo->m_pBottomMostHandler); pNestedExInfo = new (nothrow) ExInfo(); // Very rare failure here; need robust allocator. if (pNestedExInfo == NULL) { // if we can't allocate memory, we can't correctly continue. #if defined(_DEBUG) if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NestedEhOom)) _ASSERTE(!"OOM in callback from unmanaged filter."); #endif // _DEBUG EEPOLICY_HANDLE_FATAL_ERROR(COR_E_OUTOFMEMORY); } pNestedExInfo->m_StackAddress = limit; // Note: this is also the flag that tells us this // ExInfo was stack allocated. } else { pNestedExInfo = &((NestedHandlerExRecord*)pNestedER)->m_handlerInfo; } LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: PushExInfo() current: 0x%p previous: 0x%p\n", pExInfo->m_StackAddress, pNestedExInfo->m_StackAddress)); _ASSERTE(pNestedExInfo); pNestedExInfo->m_hThrowable = NULL; // pNestedExInfo may be stack allocated, and as such full of // garbage. m_hThrowable must be sane, so set it to NULL. (We could // zero the entire record, but this is cheaper.) pNestedExInfo->CopyAndClearSource(pExInfo); pExInfo->m_pPrevNestedInfo = pNestedExInfo; // Save at head of nested info chain #if 0 /* the following code was introduced in Whidbey as part of the Faulting Exception Frame removal (12/03). However it isn't correct. If any nested exceptions occur while processing a rethrow, we would incorrectly consider the nested exception to be a rethrow. See VSWhidbey 349379 for an example. Therefore I am disabling this code until we see a failure that explains why it was added in the first place. cwb 9/04. */ // If we're here as a result of a rethrown exception, set the rethrown flag on the new ExInfo. if (bRethrownException) { pExInfo->m_ExceptionFlags.SetIsRethrown(); } #endif } else { // At this point, either: // // 1) the bottom-most handler is NULL, implying this is a new exception for which we are getting ready, OR // 2) the bottom-most handler is not-NULL, implying that a there is already an existing exception in progress. // // Scenario 1 is that of a new throw and is easy to understand. Scenario 2 is the interesting one. // // ManagedA1 -> ManagedA2 -> ManagedA3 -> NativeCodeA -> ManagedB1 -> ManagedB2 -> ManagedB3 -> NativeCodeB // // On x86, each block of managed code is protected by one COMPlusFrameHandler [CPFH] (CLR's exception handler // for managed code), unlike 64bit where each frame has a personality routine attached to it. Thus, // for the example above, assume CPFH_A protects ManagedA* blocks and is setup just before the call to // ManagedA1. Likewise, CPFH_B protects ManagedB* blocks and is setup just before the call to ManagedB1. // // When ManagedB3 throws an exception, CPFH_B is invoked to look for a handler in all of the ManagedB* blocks. // At this point, it is setup as the "bottom-most-handler" (BMH). If no handler is found and exception reaches // ManagedA* blocks, CPFH_A is invoked to look for a handler and thus, becomes BMH. // // Thus, in the first pass on x86 for a given exception, a particular CPFH will be invoked only once when looking // for a handler and thus, registered as BMH only once. Either the exception goes unhandled and the process will // terminate or a handler will be found and second pass will commence. // // However, assume NativeCodeB had a __try/__finally and raised an exception [E1] within the __try. Let's assume // it gets caught in ManagedB1 and thus, unwind is triggered. At this point, the active exception tracker // has context about the exception thrown out of __try and CPFH_B is registered as BMH. // // If the __finally throws a new exception [E2], CPFH_B will be invoked again for first pass while looking for // a handler for the thrown exception. Since BMH is already non-NULL, we will come here since EstablisherFrame will be // the same as BMH (because EstablisherFrame will be that of CPFH_B). We will proceed to overwrite the "required" parts // of the existing exception tracker with the details of E2 (see setting of exception record and context below), erasing // any artifact of E1. // // This is unlike Scenario 1 when exception tracker is completely initialized to default values. This is also // unlike 64bit which will detect that E1 and E2 are different exceptions and hence, will setup a new tracker // to track E2, effectively behaving like Scenario 1 above. X86 cannot do this since there is no nested exception // tracker setup that gets to see the new exception. // // Thus, if E1 was a CSE and E2 isn't, we will come here and treat E2 as a CSE as well since corruption severity // is initialized as part of exception tracker initialization. Thus, E2 will start to be treated as CSE, which is // incorrect. Similar argument applies to delivery of First chance exception notification delivery. // // <QUIP> Another example why we should unify EH systems :) </QUIP> // // To address this issue, we will need to reset exception tracker here, just like the overwriting of "required" // parts of exception tracker. // If the current establisher frame is the same as the bottom-most-handler and we are here // in the first pass, assert that current exception and the one tracked by active exception tracker // are indeed different exceptions. In such a case, we must reset the exception tracker so that it can be // setup correctly further down when CEHelper::SetupCorruptionSeverityForActiveException is invoked. if ((pExInfo->m_pBottomMostHandler != NULL) && (pEstablisherFrame == pExInfo->m_pBottomMostHandler)) { // Current exception should be different from the one exception tracker is already tracking. _ASSERTE(pExceptionRecord != pExInfo->m_pExceptionRecord); // This cannot be nested exceptions - they are handled earlier (see above). _ASSERTE(!bNestedException); LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: Bottom-most handler (0x%p) is the same as EstablisherFrame.\n", pExInfo->m_pBottomMostHandler)); LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: Exception record in exception tracker is 0x%p, while that of new exception is 0x%p.\n", pExInfo->m_pExceptionRecord, pExceptionRecord)); LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: Resetting exception tracker (0x%p).\n", pExInfo)); // This will reset the exception tracker state, including the corruption severity. pExInfo->Init(); } } // If we are handling a fault from managed code, we need to set the Thread->ExInfo->pContext to // the current fault context, which is used in the stack walk to get back into the managed // stack with the correct registers. (Previously, this was done by linking in a FaultingExceptionFrame // record.) // We are about to create the managed exception object, which may trigger a GC, so set this up now. pExInfo->m_pExceptionRecord = pExceptionRecord; pExInfo->m_pContext = pContext; if (pContext && ShouldHandleManagedFault(pExceptionRecord, pContext, pEstablisherFrame, pThread)) { // If this was a fault in managed code, rather than create a Frame for stackwalking, // we can use this exinfo (after all, it has all the register info.) pExInfo->m_ExceptionFlags.SetUseExInfoForStackwalk(); } // It should now be safe for a GC to happen. // case 1 & 3: this is the first time through of a new, nested, or rethrown exception, so see if we can // find a handler. Only setup throwable if are bottommost handler if (IsComPlusException(pExceptionRecord) && (!bAsynchronousThreadStop)) { // Update the throwable from the last thrown object. Note: this may cause OOM, in which case we replace // both throwables with the preallocated OOM exception. pThread->SafeSetThrowables(pThread->LastThrownObject()); // now we've got a COM+ exception, fall through to so see if we handle it STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: fall through ExInfo:0x%p setting m_pBottomMostHandler to 0x%p from 0x%p\n", pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler); pExInfo->m_pBottomMostHandler = pEstablisherFrame; } else if (bRethrownException) { // If it was rethrown and not COM+, will still be the last one thrown. Either we threw it last and // stashed it here or someone else caught it and rethrew it, in which case it will still have been // originally stashed here. // Update the throwable from the last thrown object. Note: this may cause OOM, in which case we replace // both throwables with the preallocated OOM exception. pThread->SafeSetThrowables(pThread->LastThrownObject()); STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: rethrow non-COM+ ExInfo:0x%p setting m_pBottomMostHandler to 0x%p from 0x%p\n", pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler); pExInfo->m_pBottomMostHandler = pEstablisherFrame; } else { if (!fIsManagedCode) { tct.bDontCatch = false; } if (exceptionCode == STATUS_BREAKPOINT) { // don't catch int 3 retval = ExceptionContinueSearch; goto exit; } // We need to set m_pBottomMostHandler here, Thread::IsExceptionInProgress returns 1. // This is a necessary part of suppressing thread abort exceptions in the constructor // of any exception object we might create. STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: setting ExInfo:0x%p m_pBottomMostHandler for IsExceptionInProgress to 0x%p from 0x%p\n", pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler); pExInfo->m_pBottomMostHandler = pEstablisherFrame; // Create the managed exception object. OBJECTREF throwable = CreateCOMPlusExceptionObject(pThread, pExceptionRecord, bAsynchronousThreadStop); // Set the throwables on the thread to the newly created object. If this fails, it will return a // preallocated exception object instead. This also updates the last thrown exception, for rethrows. throwable = pThread->SafeSetThrowables(throwable); // Set the exception code and pointers. We set these after setting the throwables on the thread, // because if the proper exception is replaced by an OOM exception, we still want the exception code // and pointers set in the OOM exception. EXCEPTIONREF exceptionRef = (EXCEPTIONREF)throwable; exceptionRef->SetXCode(pExceptionRecord->ExceptionCode); exceptionRef->SetXPtrs(&exceptionPointers); } tct.pBottomFrame = NULL; EEToProfilerExceptionInterfaceWrapper::ExceptionThrown(pThread); g_exceptionCount++; } // End of case-1-or-3 { // Allocate storage for the stack trace. OBJECTREF throwable = NULL; GCPROTECT_BEGIN(throwable); throwable = pThread->GetThrowable(); if (IsProcessCorruptedStateException(exceptionCode, throwable)) { // Failfast if exception indicates corrupted process state EEPOLICY_HANDLE_FATAL_ERROR(exceptionCode); } // If we're out of memory, then we figure there's probably not memory to maintain a stack trace, so we skip it. // If we've got a stack overflow, then we figure the stack will be so huge as to make tracking the stack trace // impracticle, so we skip it. if ((throwable == CLRException::GetPreallocatedOutOfMemoryException()) || (throwable == CLRException::GetPreallocatedStackOverflowException())) { tct.bAllowAllocMem = FALSE; } else { pExInfo->m_StackTraceInfo.AllocateStackTrace(); } GCPROTECT_END(); } // Set up information for GetExceptionPointers()/GetExceptionCode() callback. pExInfo->SetExceptionCode(pExceptionRecord); pExInfo->m_pExceptionPointers = &exceptionPointers; if (bRethrownException || bNestedException) { _ASSERTE(pExInfo->m_pPrevNestedInfo != NULL); SetStateForWatsonBucketing(bRethrownException, pExInfo->GetPreviousExceptionTracker()->GetThrowableAsHandle()); } #ifdef DEBUGGING_SUPPORTED // // At this point the exception is still fresh to us, so assert that // there should be nothing from the debugger on it. // _ASSERTE(!pExInfo->m_ExceptionFlags.DebuggerInterceptInfo()); #endif if (pThread->IsRudeAbort()) { OBJECTREF throwable = pThread->GetThrowable(); if (throwable == NULL || !IsExceptionOfType(kThreadAbortException, &throwable)) { // Neither of these sets will throw because the throwable that we're setting is a preallocated // exception. This also updates the last thrown exception, for rethrows. pThread->SafeSetThrowables(CLRException::GetBestThreadAbortException()); } if (!pThread->IsRudeAbortInitiated()) { pThread->PreWorkForThreadAbort(); } } LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: looking for handler bottom %x, top %x\n", tct.pBottomFrame, tct.pTopFrame)); tct.bReplaceStack = pExInfo->m_pBottomMostHandler == pEstablisherFrame && !bRethrownException; tct.bSkipLastElement = bRethrownException && bNestedException; found = LookForHandler(&exceptionPointers, pThread, &tct); // We have searched this far. pExInfo->m_pSearchBoundary = tct.pTopFrame; LOG((LF_EH, LL_INFO1000, "CPFH_RealFirstPassHandler: set pSearchBoundary to 0x%p\n", pExInfo->m_pSearchBoundary)); if ((found == LFH_NOT_FOUND) #ifdef DEBUGGING_SUPPORTED && !pExInfo->m_ExceptionFlags.DebuggerInterceptInfo() #endif ) { LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: NOT_FOUND\n")); if (tct.pTopFrame == FRAME_TOP) { LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: NOT_FOUND at FRAME_TOP\n")); } retval = ExceptionContinueSearch; goto exit; } else { // so we are going to handle the exception // Remove the nested exception record -- before calling RtlUnwind. // The second-pass callback for a NestedExceptionRecord assumes that if it's // being unwound, it should pop one exception from the pExInfo chain. This is // true for any older NestedRecords that might be unwound -- but not for the // new one we're about to add. To avoid this, we remove the new record // before calling Unwind. // // <TODO>@NICE: This can probably be a little cleaner -- the nested record currently // is also used to guard the running of the filter code. When we clean up the // behaviour of exceptions within filters, we should be able to get rid of this // PUSH/POP/PUSH behaviour.</TODO> _ASSERTE(bPopNestedHandlerExRecord); UNINSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg)); // Since we are going to handle the exception we switch into preemptive mode GCX_PREEMP_NO_DTOR(); #ifdef DEBUGGING_SUPPORTED // // Check if the debugger wants to intercept this frame at a different point than where we are. // if (pExInfo->m_ExceptionFlags.DebuggerInterceptInfo()) { ClrDebuggerDoUnwindAndIntercept(pEstablisherFrame, pExceptionRecord); // // If this returns, then the debugger couldn't do it's stuff and we default to the found handler. // if (found == LFH_NOT_FOUND) { retval = ExceptionContinueSearch; // we need to be sure to switch back into Cooperative mode since we are going to // jump to the exit: label and follow the normal return path (it is expected that // CPFH_RealFirstPassHandler returns in COOP. GCX_PREEMP_NO_DTOR_END(); goto exit; } } #endif LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: handler found: %s\n", tct.pFunc->m_pszDebugMethodName)); CallRtlUnwindSafe(pEstablisherFrame, RtlUnwindCallback, pExceptionRecord, 0); // on x86 at least, RtlUnwind always returns // The CallRtlUnwindSafe could have popped the explicit frame that the tct.pBottomFrame points to (UMThunkPrestubHandler // does that). In such case, the tct.pBottomFrame needs to be updated to point to the first valid explicit frame. Frame* frame = pThread->GetFrame(); if ((tct.pBottomFrame != NULL) && (frame > tct.pBottomFrame)) { tct.pBottomFrame = frame; } // Note: we've completed the unwind pass up to the establisher frame, and we're headed off to finish our // cleanup and end up back in jitted code. Any more FS0 handlers pushed from this point on out will _not_ be // unwound. // Note: we are still in Preemptive mode here and that is correct, COMPlusAfterUnwind will switch us back // into Cooperative mode. return COMPlusAfterUnwind(pExceptionRecord, pEstablisherFrame, tct); } exit: { // We need to be in COOP if we get here GCX_ASSERT_COOP(); } // If we got as far as saving pExInfo, save the context pointer so it's available for the unwind. if (pExInfo) { pExInfo->m_pContext = pContext; // pExInfo->m_pExceptionPointers points to a local structure, which is now going out of scope. pExInfo->m_pExceptionPointers = NULL; } #if defined(USE_FEF) if (bPopFaultingExceptionFrame) { faultingExceptionFrame.Pop(); } #endif // USE_FEF if (bPopNestedHandlerExRecord) { UNINSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg)); } return retval; } // CPFH_RealFirstPassHandler() //****************************************************************************** // void InitializeExceptionHandling() { WRAPPER_NO_CONTRACT; InitSavedExceptionInfo(); CLRAddVectoredHandlers(); // Initialize the lock used for synchronizing access to the stacktrace in the exception object g_StackTraceArrayLock.Init(LOCK_TYPE_DEFAULT, TRUE); } //****************************************************************************** static inline EXCEPTION_DISPOSITION __cdecl CPFH_FirstPassHandler(EXCEPTION_RECORD *pExceptionRecord, EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, CONTEXT *pContext, DISPATCHER_CONTEXT *pDispatcherContext) { WRAPPER_NO_CONTRACT; EXCEPTION_DISPOSITION retval; _ASSERTE (!(pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND))); DWORD exceptionCode = pExceptionRecord->ExceptionCode; Thread *pThread = GetThread(); STRESS_LOG4(LF_EH, LL_INFO100, "CPFH_FirstPassHandler: pEstablisherFrame = %x EH code = %x EIP = %x with ESP = %x\n", pEstablisherFrame, exceptionCode, pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0); EXCEPTION_POINTERS ptrs = { pExceptionRecord, pContext }; // Call to the vectored handler to give other parts of the Runtime a chance to jump in and take over an // exception before we do too much with it. The most important point in the vectored handler is not to toggle // the GC mode. DWORD filter = CLRVectoredExceptionHandler(&ptrs); if (filter == (DWORD) EXCEPTION_CONTINUE_EXECUTION) { return ExceptionContinueExecution; } else if (filter == EXCEPTION_CONTINUE_SEARCH) { return ExceptionContinueSearch; } #if defined(STRESS_HEAP) // // Check to see if this exception is due to GCStress. Since the GCStress mechanism only injects these faults // into managed code, we only need to check for them in CPFH_FirstPassHandler. // if (IsGcMarker(pContext, pExceptionRecord)) { return ExceptionContinueExecution; } #endif // STRESS_HEAP // We always want to be in co-operative mode when we run this function and whenever we return // from it, want to go to pre-emptive mode because are returning to OS. BOOL disabled = pThread->PreemptiveGCDisabled(); GCX_COOP_NO_DTOR(); BOOL bAsynchronousThreadStop = IsThreadHijackedForThreadStop(pThread, pExceptionRecord); if (bAsynchronousThreadStop) { // If we ever get here in preemptive mode, we're in trouble. We've // changed the thread's IP to point at a little function that throws ... if // the thread were to be in preemptive mode and a GC occurred, the stack // crawl would have been all messed up (becuase we have no frame that points // us back to the right place in managed code). _ASSERTE(disabled); AdjustContextForThreadStop(pThread, pContext); LOG((LF_EH, LL_INFO100, "CPFH_FirstPassHandler is Asynchronous Thread Stop or Abort\n")); } pThread->ResetThrowControlForThread(); CPFH_VerifyThreadIsInValidState(pThread, exceptionCode, pEstablisherFrame); // If we were in cooperative mode when we came in here, then its okay to see if we should do HandleManagedFault // and push a FaultingExceptionFrame. If we weren't in coop mode coming in here, then it means that there's no // way the exception could really be from managed code. I might look like it was from managed code, but in // reality its a rethrow from unmanaged code, either unmanaged user code, or unmanaged EE implementation. if (disabled && ShouldHandleManagedFault(pExceptionRecord, pContext, pEstablisherFrame, pThread)) { #if defined(USE_FEF) HandleManagedFault(pExceptionRecord, pContext, pEstablisherFrame, pThread); retval = ExceptionContinueExecution; goto exit; #else // USE_FEF // Save the context pointer in the Thread's EXInfo, so that a stack crawl can recover the // register values from the fault. //@todo: I haven't yet found any case where we need to do anything here. If there are none, eliminate // this entire if () {} block. #endif // USE_FEF } // OK. We're finally ready to start the real work. Nobody else grabbed the exception in front of us. Now we can // get started. retval = CPFH_RealFirstPassHandler(pExceptionRecord, pEstablisherFrame, pContext, pDispatcherContext, bAsynchronousThreadStop, disabled); #if defined(USE_FEF) // This label is only used in the HandleManagedFault() case above. exit: #endif if (retval != ExceptionContinueExecution || !disabled) { GCX_PREEMP_NO_DTOR(); } STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_FirstPassHandler: exiting with retval %d\n", retval); return retval; } // CPFH_FirstPassHandler() //****************************************************************************** inline void CPFH_UnwindFrames1(Thread* pThread, EXCEPTION_REGISTRATION_RECORD* pEstablisherFrame, DWORD exceptionCode) { WRAPPER_NO_CONTRACT; ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); // Ready to unwind the stack... ThrowCallbackType tct; tct.Init(); tct.bIsUnwind = TRUE; tct.pTopFrame = GetCurrFrame(pEstablisherFrame); // highest frame to search to tct.pBottomFrame = NULL; #ifdef _DEBUG tct.pCurrentExceptionRecord = pEstablisherFrame; tct.pPrevExceptionRecord = GetPrevSEHRecord(pEstablisherFrame); #endif #ifdef DEBUGGING_SUPPORTED EXCEPTION_REGISTRATION_RECORD *pInterceptEstablisherFrame = NULL; // If the exception is intercepted, use information stored in the DebuggerExState to unwind the stack. if (pExInfo->m_ExceptionFlags.DebuggerInterceptInfo()) { pExInfo->m_DebuggerExState.GetDebuggerInterceptInfo(&pInterceptEstablisherFrame, NULL, // MethodDesc **ppFunc, NULL, // int *pdHandler, NULL, // BYTE **ppStack NULL, // ULONG_PTR *pNativeOffset, NULL // Frame **ppFrame) ); LOG((LF_EH, LL_INFO1000, "CPFH_UnwindFrames1: frames are Est 0x%X, Intercept 0x%X\n", pEstablisherFrame, pInterceptEstablisherFrame)); // // When we set up for the interception we store off the CPFH or CPNEH that we // *know* will handle unwinding the destination of the intercept. // // However, a CPNEH with the same limiting Capital-F-rame could do the work // and unwind us, so... // // If this is the exact frame handler we are supposed to search for, or // if this frame handler services the same Capital-F-rame as the frame handler // we are looking for (i.e. this frame handler may do the work that we would // expect our frame handler to do), // then // we need to pass the interception destination during this unwind. // _ASSERTE(IsUnmanagedToManagedSEHHandler(pEstablisherFrame)); if ((pEstablisherFrame == pInterceptEstablisherFrame) || (GetCurrFrame(pEstablisherFrame) == GetCurrFrame(pInterceptEstablisherFrame))) { pExInfo->m_DebuggerExState.GetDebuggerInterceptInfo(NULL, &(tct.pFunc), &(tct.dHandler), &(tct.pStack), NULL, &(tct.pBottomFrame) ); LOG((LF_EH, LL_INFO1000, "CPFH_UnwindFrames1: going to: pFunc:%#X, pStack:%#X\n", tct.pFunc, tct.pStack)); } } #endif UnwindFrames(pThread, &tct); LOG((LF_EH, LL_INFO1000, "CPFH_UnwindFrames1: after unwind ec:%#x, tct.pTopFrame:0x%p, pSearchBndry:0x%p\n" " pEstFrame:0x%p, IsC+NestExRec:%d, !Nest||Active:%d\n", exceptionCode, tct.pTopFrame, pExInfo->m_pSearchBoundary, pEstablisherFrame, IsComPlusNestedExceptionRecord(pEstablisherFrame), (!IsComPlusNestedExceptionRecord(pEstablisherFrame) || reinterpret_cast<NestedHandlerExRecord*>(pEstablisherFrame)->m_ActiveForUnwind))); if (tct.pTopFrame >= pExInfo->m_pSearchBoundary && (!IsComPlusNestedExceptionRecord(pEstablisherFrame) || reinterpret_cast<NestedHandlerExRecord*>(pEstablisherFrame)->m_ActiveForUnwind) ) { // If this is the search boundary, and we're not a nested handler, then // this is the last time we'll see this exception. Time to unwind our // exinfo. STRESS_LOG0(LF_EH, LL_INFO100, "CPFH_UnwindFrames1: Exception unwind -- unmanaged catcher detected\n"); pExInfo->UnwindExInfo((VOID*)pEstablisherFrame); } } // CPFH_UnwindFrames1() //****************************************************************************** inline EXCEPTION_DISPOSITION __cdecl CPFH_UnwindHandler(EXCEPTION_RECORD *pExceptionRecord, EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, CONTEXT *pContext, void *pDispatcherContext) { WRAPPER_NO_CONTRACT; _ASSERTE (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND)); #ifdef _DEBUG // Note: you might be inclined to write "static int breakOnSecondPass = CLRConfig::GetConfigValue(...);", but // you can't do that here. That causes C++ EH to be generated under the covers for this function, and this // function isn't allowed to have any C++ EH in it because its never going to return. static int breakOnSecondPass; // = 0 static BOOL breakOnSecondPassSetup; // = FALSE if (!breakOnSecondPassSetup) { breakOnSecondPass = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnSecondPass); breakOnSecondPassSetup = TRUE; } if (breakOnSecondPass != 0) { _ASSERTE(!"Unwind handler"); } #endif DWORD exceptionCode = pExceptionRecord->ExceptionCode; Thread *pThread = GetThread(); ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); STRESS_LOG4(LF_EH, LL_INFO100, "In CPFH_UnwindHandler EHCode = %x EIP = %x with ESP = %x, pEstablisherFrame = 0x%p\n", exceptionCode, pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0, pEstablisherFrame); // We always want to be in co-operative mode when we run this function. Whenever we return // from it, want to go to pre-emptive mode because are returning to OS. { // needs to be in its own scope to avoid polluting the namespace, since // we don't do a _END then we don't revert the state GCX_COOP_NO_DTOR(); } CPFH_VerifyThreadIsInValidState(pThread, exceptionCode, pEstablisherFrame); if (IsComPlusNestedExceptionRecord(pEstablisherFrame)) { NestedHandlerExRecord *pHandler = reinterpret_cast<NestedHandlerExRecord*>(pEstablisherFrame); if (pHandler->m_pCurrentExInfo != NULL) { // See the comment at the end of COMPlusNestedExceptionHandler about nested exception. // OS is going to skip the EstablisherFrame before our NestedHandler. if (pHandler->m_pCurrentExInfo->m_pBottomMostHandler <= pHandler->m_pCurrentHandler) { // We're unwinding -- the bottom most handler is potentially off top-of-stack now. If // it is, change it to the next COM+ frame. (This one is not good, as it's about to // disappear.) EXCEPTION_REGISTRATION_RECORD *pNextBottomMost = GetNextCOMPlusSEHRecord(pHandler->m_pCurrentHandler); STRESS_LOG3(LF_EH, LL_INFO10000, "COMPlusNestedExceptionHandler: setting ExInfo:0x%p m_pBottomMostHandler from 0x%p to 0x%p\n", pHandler->m_pCurrentExInfo, pHandler->m_pCurrentExInfo->m_pBottomMostHandler, pNextBottomMost); pHandler->m_pCurrentExInfo->m_pBottomMostHandler = pNextBottomMost; } } } // this establishes a marker so can determine if are processing a nested exception // don't want to use the current frame to limit search as it could have been unwound by // the time get to nested handler (ie if find an exception, unwind to the call point and // then resume in the catch and then get another exception) so make the nested handler // have the same boundary as this one. If nested handler can't find a handler, we won't // end up searching this frame list twice because the nested handler will set the search // boundary in the thread and so if get back to this handler it will have a range that starts // and ends at the same place. NestedHandlerExRecord nestedHandlerExRecord; nestedHandlerExRecord.Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, GetCurrFrame(pEstablisherFrame)); nestedHandlerExRecord.m_ActiveForUnwind = TRUE; nestedHandlerExRecord.m_pCurrentExInfo = pExInfo; nestedHandlerExRecord.m_pCurrentHandler = pEstablisherFrame; INSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg)); // Unwind the stack. The establisher frame sets the boundary. CPFH_UnwindFrames1(pThread, pEstablisherFrame, exceptionCode); // We're unwinding -- the bottom most handler is potentially off top-of-stack now. If // it is, change it to the next COM+ frame. (This one is not good, as it's about to // disappear.) if (pExInfo->m_pBottomMostHandler && pExInfo->m_pBottomMostHandler <= pEstablisherFrame) { EXCEPTION_REGISTRATION_RECORD *pNextBottomMost = GetNextCOMPlusSEHRecord(pEstablisherFrame); // If there is no previous COM+ SEH handler, GetNextCOMPlusSEHRecord() will return -1. Much later, we will dereference that and AV. _ASSERTE (pNextBottomMost != EXCEPTION_CHAIN_END); STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_UnwindHandler: setting ExInfo:0x%p m_pBottomMostHandler from 0x%p to 0x%p\n", pExInfo, pExInfo->m_pBottomMostHandler, pNextBottomMost); pExInfo->m_pBottomMostHandler = pNextBottomMost; } { // needs to be in its own scope to avoid polluting the namespace, since // we don't do a _END then we don't revert the state GCX_PREEMP_NO_DTOR(); } UNINSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg)); // If we are here, then exception was not caught in managed code protected by this // ComplusFrameHandler. Hence, reset thread abort state if this is the last personality routine, // for managed code, on the stack. ResetThreadAbortState(pThread, pEstablisherFrame); STRESS_LOG0(LF_EH, LL_INFO100, "CPFH_UnwindHandler: Leaving with ExceptionContinueSearch\n"); return ExceptionContinueSearch; } // CPFH_UnwindHandler() //****************************************************************************** // This is the first handler that is called in the context of managed code // It is the first level of defense and tries to find a handler in the user // code to handle the exception //------------------------------------------------------------------------- // EXCEPTION_DISPOSITION __cdecl COMPlusFrameHandler( // EXCEPTION_RECORD *pExceptionRecord, // _EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, // CONTEXT *pContext, // DISPATCHER_CONTEXT *pDispatcherContext) // // See http://www.microsoft.com/msj/0197/exception/exception.aspx for a background piece on Windows // unmanaged structured exception handling. EXCEPTION_HANDLER_IMPL(COMPlusFrameHandler) { WRAPPER_NO_CONTRACT; _ASSERTE(!DebugIsEECxxException(pExceptionRecord) && "EE C++ Exception leaked into managed code!"); STRESS_LOG5(LF_EH, LL_INFO100, "In COMPlusFrameHander EH code = %x flag = %x EIP = %x with ESP = %x, pEstablisherFrame = 0x%p\n", pExceptionRecord->ExceptionCode, pExceptionRecord->ExceptionFlags, pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0, pEstablisherFrame); _ASSERTE((pContext == NULL) || ((pContext->ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL)); if (g_fNoExceptions) return ExceptionContinueSearch; // No EH during EE shutdown. // Check if the exception represents a GCStress Marker. If it does, // we shouldnt record its entry in the TLS as such exceptions are // continuable and can confuse the VM to treat them as CSE, // as they are implemented using illegal instruction exception. bool fIsGCMarker = false; #ifdef HAVE_GCCOVER // This is a debug only macro if (GCStress<cfg_instr_jit>::IsEnabled()) { // TlsGetValue trashes last error. When Complus_GCStress=4, GC is invoked // on every allowable JITed instruction by means of our exception handling machanism // it is very easy to trash the last error. For example, a p/invoke called a native method // which sets last error. Before we getting the last error in the IL stub, it is trashed here DWORD dwLastError = GetLastError(); fIsGCMarker = IsGcMarker(pContext, pExceptionRecord); if (!fIsGCMarker) { SaveCurrentExceptionInfo(pExceptionRecord, pContext); } SetLastError(dwLastError); } else #endif { // GCStress does not exist on retail builds (see IsGcMarker implementation for details). SaveCurrentExceptionInfo(pExceptionRecord, pContext); } if (fIsGCMarker) { // If this was a GCStress marker exception, then return // ExceptionContinueExecution to the OS. return ExceptionContinueExecution; } EXCEPTION_DISPOSITION retVal = ExceptionContinueSearch; Thread *pThread = GetThread(); if ((pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND)) == 0) { if (pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW) { EEPolicy::HandleStackOverflow(); // VC's unhandled exception filter plays with stack. It VirtualAlloc's a new stack, and // then launch Watson from the new stack. When Watson asks CLR to save required data, we // are not able to walk the stack. // Setting Context in ExInfo so that our Watson dump routine knows how to walk this stack. ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); pExInfo->m_pContext = pContext; // Save the reference to the topmost handler we see during first pass when an SO goes past us. // When an unwind gets triggered for the exception, we will reset the frame chain when we reach // the topmost handler we saw during the first pass. // // This unifies, behaviour-wise, 32bit with 64bit. if ((pExInfo->m_pTopMostHandlerDuringSO == NULL) || (pEstablisherFrame > pExInfo->m_pTopMostHandlerDuringSO)) { pExInfo->m_pTopMostHandlerDuringSO = pEstablisherFrame; } // Switch to preemp mode since we are returning back to the OS. // We will do the quick switch since we are short of stack FastInterlockAnd (&pThread->m_fPreemptiveGCDisabled, 0); return ExceptionContinueSearch; } } else { DWORD exceptionCode = pExceptionRecord->ExceptionCode; if (exceptionCode == STATUS_UNWIND) { // If exceptionCode is STATUS_UNWIND, RtlUnwind is called with a NULL ExceptionRecord, // therefore OS uses a faked ExceptionRecord with STATUS_UNWIND code. Then we need to // look at our saved exception code. exceptionCode = GetCurrentExceptionCode(); } if (exceptionCode == STATUS_STACK_OVERFLOW) { // We saved the context during the first pass in case the stack overflow exception is // unhandled and Watson dump code needs it. Now we are in the second pass, therefore // either the exception is handled by user code, or we have finished unhandled exception // filter process, and the OS is unwinding the stack. Either way, we don't need the // context any more. It is very important to reset the context so that our code does not // accidentally walk the frame using the dangling context in ExInfoWalker::WalkToPosition. ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); pExInfo->m_pContext = NULL; // We should have the reference to the topmost handler seen during the first pass of SO _ASSERTE(pExInfo->m_pTopMostHandlerDuringSO != NULL); // Reset frame chain till we reach the topmost establisher frame we saw in the first pass. // This will ensure that if any intermediary frame calls back into managed (e.g. native frame // containing a __finally that reverse pinvokes into managed), then we have the correct // explicit frame on the stack. Resetting the frame chain only when we reach the topmost // personality routine seen in the first pass may not result in expected behaviour, // specially during stack walks when crawl frame needs to be initialized from // explicit frame. if (pEstablisherFrame <= pExInfo->m_pTopMostHandlerDuringSO) { GCX_COOP_NO_DTOR(); if (pThread->GetFrame() < GetCurrFrame(pEstablisherFrame)) { // We are very short of stack. We avoid calling UnwindFrame which may // run unknown code here. pThread->SetFrame(GetCurrFrame(pEstablisherFrame)); } } // Switch to preemp mode since we are returning back to the OS. // We will do the quick switch since we are short of stack FastInterlockAnd(&pThread->m_fPreemptiveGCDisabled, 0); return ExceptionContinueSearch; } } if (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND)) { retVal = CPFH_UnwindHandler(pExceptionRecord, pEstablisherFrame, pContext, pDispatcherContext); } else { /* Make no assumptions about the current machine state. <TODO>@PERF: Only needs to be called by the very first handler invoked by SEH </TODO>*/ ResetCurrentContext(); retVal = CPFH_FirstPassHandler(pExceptionRecord, pEstablisherFrame, pContext, pDispatcherContext); } return retVal; } // COMPlusFrameHandler() //------------------------------------------------------------------------- // This is called by the EE to restore the stack pointer if necessary. //------------------------------------------------------------------------- // This can't be inlined into the caller to avoid introducing EH frame NOINLINE LPVOID COMPlusEndCatchWorker(Thread * pThread) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_COOPERATIVE; LOG((LF_EH, LL_INFO1000, "COMPlusPEndCatch:called with " "pThread:0x%x\n",pThread)); // indicate that we are out of the managed clause as early as possible ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); pExInfo->m_EHClauseInfo.SetManagedCodeEntered(FALSE); void* esp = NULL; // Notify the profiler that the catcher has finished running // IL stubs don't contain catch blocks so inability to perform this check does not matter. // if (!pFunc->IsILStub()) EEToProfilerExceptionInterfaceWrapper::ExceptionCatcherLeave(); // no need to set pExInfo->m_ClauseType = (DWORD)COR_PRF_CLAUSE_NONE now that the // notification is done because because the ExInfo record is about to be popped off anyway LOG((LF_EH, LL_INFO1000, "COMPlusPEndCatch:pThread:0x%x\n",pThread)); #ifdef _DEBUG gLastResumedExceptionFunc = NULL; gLastResumedExceptionHandler = 0; #endif // Set the thrown object to NULL as no longer needed. This also sets the last thrown object to NULL. pThread->SafeSetThrowables(NULL); // reset the stashed exception info pExInfo->m_pExceptionRecord = NULL; pExInfo->m_pContext = NULL; pExInfo->m_pExceptionPointers = NULL; if (pExInfo->m_pShadowSP) { *pExInfo->m_pShadowSP = 0; // Reset the shadow SP } // pExInfo->m_dEsp was set in ResumeAtJITEH(). It is the Esp of the // handler nesting level which catches the exception. esp = (void*)(size_t)pExInfo->m_dEsp; pExInfo->UnwindExInfo(esp); // Prepare to sync managed exception state // // In a case when we're nested inside another catch block, the domain in which we're executing may not be the // same as the one the domain of the throwable that was just made the current throwable above. Therefore, we // make a special effort to preserve the domain of the throwable as we update the the last thrown object. // // This function (COMPlusEndCatch) can also be called by the in-proc debugger helper thread on x86 when // an attempt to SetIP takes place to set IP outside the catch clause. In such a case, managed thread object // will not be available. Thus, we should reset the severity only if its not such a thread. // // This behaviour (of debugger doing SetIP) is not allowed on 64bit since the catch clauses are implemented // as a seperate funclet and it's just not allowed to set the IP across EH scopes, such as from inside a catch // clause to outside of the catch clause. bool fIsDebuggerHelperThread = (g_pDebugInterface == NULL) ? false : g_pDebugInterface->ThisIsHelperThread(); // Sync managed exception state, for the managed thread, based upon any active exception tracker pThread->SyncManagedExceptionState(fIsDebuggerHelperThread); LOG((LF_EH, LL_INFO1000, "COMPlusPEndCatch: esp=%p\n", esp)); return esp; } // // This function works in conjunction with JIT_EndCatch. On input, the parameters are set as follows: // ebp, ebx, edi, esi: the values of these registers at the end of the catch block // *pRetAddress: the next instruction after the call to JIT_EndCatch // // On output, *pRetAddress is the instruction at which to resume execution. This may be user code, // or it may be ThrowControlForThread (which will re-raise a pending ThreadAbortException). // // Returns the esp to set before resuming at *pRetAddress. // LPVOID STDCALL COMPlusEndCatch(LPVOID ebp, DWORD ebx, DWORD edi, DWORD esi, LPVOID* pRetAddress) { // // PopNestedExceptionRecords directly manipulates fs:[0] chain. This method can't have any EH! // STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_COOPERATIVE; ETW::ExceptionLog::ExceptionCatchEnd(); ETW::ExceptionLog::ExceptionThrownEnd(); void* esp = COMPlusEndCatchWorker(GetThread()); // We are going to resume at a handler nesting level whose esp is dEsp. Pop off any SEH records below it. This // would be the COMPlusNestedExceptionHandler we had inserted. PopNestedExceptionRecords(esp); // // Set up m_OSContext for the call to COMPlusCheckForAbort // Thread* pThread = GetThread(); SetIP(pThread->m_OSContext, (PCODE)*pRetAddress); SetSP(pThread->m_OSContext, (TADDR)esp); SetFP(pThread->m_OSContext, (TADDR)ebp); pThread->m_OSContext->Ebx = ebx; pThread->m_OSContext->Edi = edi; pThread->m_OSContext->Esi = esi; LPVOID throwControl = COMPlusCheckForAbort((UINT_PTR)*pRetAddress); if (throwControl) *pRetAddress = throwControl; return esp; } PEXCEPTION_REGISTRATION_RECORD GetCurrentSEHRecord() { WRAPPER_NO_CONTRACT; LPVOID fs0 = (LPVOID)__readfsdword(0); #if 0 // This walk is too expensive considering we hit it every time we a CONTRACT(NOTHROW) #ifdef _DEBUG EXCEPTION_REGISTRATION_RECORD *pEHR = (EXCEPTION_REGISTRATION_RECORD *)fs0; LPVOID spVal; __asm { mov spVal, esp } // check that all the eh frames are all greater than the current stack value. If not, the // stack has been updated somehow w/o unwinding the SEH chain. // LOG((LF_EH, LL_INFO1000000, "ER Chain:\n")); while (pEHR != NULL && pEHR != EXCEPTION_CHAIN_END) { // LOG((LF_EH, LL_INFO1000000, "\tp: prev:p handler:%x\n", pEHR, pEHR->Next, pEHR->Handler)); if (pEHR < spVal) { if (gLastResumedExceptionFunc != 0) _ASSERTE(!"Stack is greater than start of SEH chain - possible missing leave in handler. See gLastResumedExceptionHandler & gLastResumedExceptionFunc for info"); else _ASSERTE(!"Stack is greater than start of SEH chain (FS:0)"); } if (pEHR->Handler == (void *)-1) _ASSERTE(!"Handler value has been corrupted"); _ASSERTE(pEHR < pEHR->Next); pEHR = pEHR->Next; } #endif #endif // 0 return (EXCEPTION_REGISTRATION_RECORD*) fs0; } PEXCEPTION_REGISTRATION_RECORD GetFirstCOMPlusSEHRecord(Thread *pThread) { WRAPPER_NO_CONTRACT; EXCEPTION_REGISTRATION_RECORD *pEHR = *(pThread->GetExceptionListPtr()); if (pEHR == EXCEPTION_CHAIN_END || IsUnmanagedToManagedSEHHandler(pEHR)) { return pEHR; } else { return GetNextCOMPlusSEHRecord(pEHR); } } PEXCEPTION_REGISTRATION_RECORD GetPrevSEHRecord(EXCEPTION_REGISTRATION_RECORD *next) { WRAPPER_NO_CONTRACT; _ASSERTE(IsUnmanagedToManagedSEHHandler(next)); EXCEPTION_REGISTRATION_RECORD *pEHR = GetCurrentSEHRecord(); _ASSERTE(pEHR != 0 && pEHR != EXCEPTION_CHAIN_END); EXCEPTION_REGISTRATION_RECORD *pBest = 0; while (pEHR != next) { if (IsUnmanagedToManagedSEHHandler(pEHR)) pBest = pEHR; pEHR = pEHR->Next; _ASSERTE(pEHR != 0 && pEHR != EXCEPTION_CHAIN_END); } return pBest; } VOID SetCurrentSEHRecord(EXCEPTION_REGISTRATION_RECORD *pSEH) { WRAPPER_NO_CONTRACT; *GetThread()->GetExceptionListPtr() = pSEH; } // Note that this logic is copied below, in PopSEHRecords __declspec(naked) VOID __cdecl PopSEHRecords(LPVOID pTargetSP) { // No CONTRACT possible on naked functions STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; __asm{ mov ecx, [esp+4] ;; ecx <- pTargetSP mov eax, fs:[0] ;; get current SEH record poploop: cmp eax, ecx jge done mov eax, [eax] ;; get next SEH record jmp poploop done: mov fs:[0], eax retn } } // // Unwind pExinfo, pops FS:[0] handlers until the interception context SP, and // resumes at interception context. // VOID UnwindExceptionTrackerAndResumeInInterceptionFrame(ExInfo* pExInfo, EHContext* context) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_MODE_COOPERATIVE; _ASSERTE(pExInfo && context); pExInfo->UnwindExInfo((LPVOID)(size_t)context->Esp); PopNestedExceptionRecords((LPVOID)(size_t)context->Esp); STRESS_LOG3(LF_EH|LF_CORDB, LL_INFO100, "UnwindExceptionTrackerAndResumeInInterceptionFrame: completing intercept at EIP = %p ESP = %p EBP = %p\n", context->Eip, context->Esp, context->Ebp); ResumeAtJitEHHelper(context); UNREACHABLE_MSG("Should never return from ResumeAtJitEHHelper!"); } // // Pop SEH records below the given target ESP. This is only used to pop nested exception records. // If bCheckForUnknownHandlers is set, it only checks for unknown FS:[0] handlers. // BOOL PopNestedExceptionRecords(LPVOID pTargetSP, BOOL bCheckForUnknownHandlers) { // No CONTRACT here, because we can't run the risk of it pushing any SEH into the current method. STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; PEXCEPTION_REGISTRATION_RECORD pEHR = GetCurrentSEHRecord(); while ((LPVOID)pEHR < pTargetSP) { // // The only handler types we're allowed to have below the limit on the FS:0 chain in these cases are a // nested exception record or a fast NExport record, so we verify that here. // // There is a special case, of course: for an unhandled exception, when the default handler does the exit // unwind, we may have an exception that escapes a finally clause, thus replacing the original unhandled // exception. If we find a catcher for that new exception, then we'll go ahead and do our own unwind, then // jump to the catch. When we are called here, just before jumpping to the catch, we'll pop off our nested // handlers, then we'll pop off one more handler: the handler that ntdll!ExecuteHandler2 pushed before // calling our nested handler. We go ahead and pop off that handler, too. Its okay, its only there to catch // exceptions from handlers and turn them into collided unwind status codes... there's no cleanup in the // handler that we're removing, and that's the important point. The handler that ExecuteHandler2 pushes // isn't a public export from ntdll, but its named "UnwindHandler" and is physically shortly after // ExecuteHandler2 in ntdll. // In this case, we don't want to pop off the NExportSEH handler since it's our outermost handler. // static HINSTANCE ExecuteHandler2Module = 0; static BOOL ExecuteHandler2ModuleInited = FALSE; // Cache the handle to the dll with the handler pushed by ExecuteHandler2. if (!ExecuteHandler2ModuleInited) { ExecuteHandler2Module = WszGetModuleHandle(W("ntdll.dll")); ExecuteHandler2ModuleInited = TRUE; } if (bCheckForUnknownHandlers) { if (!IsComPlusNestedExceptionRecord(pEHR) || !((ExecuteHandler2Module != NULL) && IsIPInModule(ExecuteHandler2Module, (PCODE)pEHR->Handler))) { return TRUE; } } #ifdef _DEBUG else { // Note: if we can't find the module containing ExecuteHandler2, we'll just be really strict and require // that we're only popping nested handlers or the FastNExportSEH handler. _ASSERTE(FastNExportSEH(pEHR) || IsComPlusNestedExceptionRecord(pEHR) || ((ExecuteHandler2Module != NULL) && IsIPInModule(ExecuteHandler2Module, (PCODE)pEHR->Handler))); } #endif // _DEBUG pEHR = pEHR->Next; } if (!bCheckForUnknownHandlers) { SetCurrentSEHRecord(pEHR); } return FALSE; } // // This is implemented differently from the PopNestedExceptionRecords above because it's called in the context of // the DebuggerRCThread to operate on the stack of another thread. // VOID PopNestedExceptionRecords(LPVOID pTargetSP, CONTEXT *pCtx, void *pSEH) { // No CONTRACT here, because we can't run the risk of it pushing any SEH into the current method. STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; #ifdef _DEBUG LOG((LF_CORDB,LL_INFO1000, "\nPrintSEHRecords:\n")); EXCEPTION_REGISTRATION_RECORD *pEHR = (EXCEPTION_REGISTRATION_RECORD *)(size_t)*(DWORD *)pSEH; // check that all the eh frames are all greater than the current stack value. If not, the // stack has been updated somehow w/o unwinding the SEH chain. while (pEHR != NULL && pEHR != EXCEPTION_CHAIN_END) { LOG((LF_EH, LL_INFO1000000, "\t%08x: next:%08x handler:%x\n", pEHR, pEHR->Next, pEHR->Handler)); pEHR = pEHR->Next; } #endif DWORD dwCur = *(DWORD*)pSEH; // 'EAX' in the original routine DWORD dwPrev = (DWORD)(size_t)pSEH; while (dwCur < (DWORD)(size_t)pTargetSP) { // Watch for the OS handler // for nested exceptions, or any C++ handlers for destructors in our call // stack, or anything else. if (dwCur < (DWORD)GetSP(pCtx)) dwPrev = dwCur; dwCur = *(DWORD *)(size_t)dwCur; LOG((LF_CORDB,LL_INFO10000, "dwCur: 0x%x dwPrev:0x%x pTargetSP:0x%x\n", dwCur, dwPrev, pTargetSP)); } *(DWORD *)(size_t)dwPrev = dwCur; #ifdef _DEBUG pEHR = (EXCEPTION_REGISTRATION_RECORD *)(size_t)*(DWORD *)pSEH; // check that all the eh frames are all greater than the current stack value. If not, the // stack has been updated somehow w/o unwinding the SEH chain. LOG((LF_CORDB,LL_INFO1000, "\nPopSEHRecords:\n")); while (pEHR != NULL && pEHR != (void *)-1) { LOG((LF_EH, LL_INFO1000000, "\t%08x: next:%08x handler:%x\n", pEHR, pEHR->Next, pEHR->Handler)); pEHR = pEHR->Next; } #endif } //========================================================================== // COMPlusThrowCallback // //========================================================================== /* * * COMPlusThrowCallbackHelper * * This function is a simple helper function for COMPlusThrowCallback. It is needed * because of the EX_TRY macro. This macro does an alloca(), which allocates space * off the stack, not free'ing it. Thus, doing a EX_TRY in a loop can easily result * in a stack overflow error. By factoring out the EX_TRY into a separate function, * we recover that stack space. * * Parameters: * pJitManager - The JIT manager that will filter the EH. * pCf - The frame to crawl. * EHClausePtr * nestingLevel * pThread - Used to determine if the thread is throwable or not. * * Return: * Exception status. * */ int COMPlusThrowCallbackHelper(IJitManager *pJitManager, CrawlFrame *pCf, ThrowCallbackType* pData, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel, OBJECTREF throwable, Thread *pThread ) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; int iFilt = 0; EX_TRY { GCPROTECT_BEGIN (throwable); // We want to call filters even if the thread is aborting, so suppress abort // checks while the filter runs. ThreadPreventAsyncHolder preventAbort; BYTE* startAddress = (BYTE*)pCf->GetCodeInfo()->GetStartAddress(); iFilt = ::CallJitEHFilter(pCf, startAddress, EHClausePtr, nestingLevel, throwable); GCPROTECT_END(); } EX_CATCH { // We had an exception in filter invocation that remained unhandled. // Sync managed exception state, for the managed thread, based upon the active exception tracker. pThread->SyncManagedExceptionState(false); // // Swallow exception. Treat as exception continue search. // iFilt = EXCEPTION_CONTINUE_SEARCH; } EX_END_CATCH(SwallowAllExceptions) return iFilt; } //****************************************************************************** // The stack walk callback for exception handling on x86. // Returns one of: // SWA_CONTINUE = 0, // continue walking // SWA_ABORT = 1, // stop walking, early out in "failure case" // SWA_FAILED = 2 // couldn't walk stack StackWalkAction COMPlusThrowCallback( // SWA value CrawlFrame *pCf, // Data from StackWalkFramesEx ThrowCallbackType *pData) // Context data passed through from CPFH { // We don't want to use a runtime contract here since this codepath is used during // the processing of a hard SO. Contracts use a significant amount of stack // which we can't afford for those cases. STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_COOPERATIVE; Frame *pFrame = pCf->GetFrame(); MethodDesc *pFunc = pCf->GetFunction(); #if defined(_DEBUG) #define METHODNAME(pFunc) (pFunc?pFunc->m_pszDebugMethodName:"<n/a>") #else #define METHODNAME(pFunc) "<n/a>" #endif STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusThrowCallback: STACKCRAWL method:%pM ('%s'), Frame:%p, FrameVtable = %pV\n", pFunc, METHODNAME(pFunc), pFrame, pCf->IsFrameless()?0:(*(void**)pFrame)); #undef METHODNAME Thread *pThread = GetThread(); if (pFrame && pData->pTopFrame == pFrame) /* Don't look past limiting frame if there is one */ return SWA_ABORT; if (!pFunc) return SWA_CONTINUE; if (pThread->IsRudeAbortInitiated()) { return SWA_CONTINUE; } ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); _ASSERTE(!pData->bIsUnwind); #ifdef _DEBUG // It SHOULD be the case that any frames we consider live between this exception // record and the previous one. if (!pExInfo->m_pPrevNestedInfo) { if (pData->pCurrentExceptionRecord) { if (pFrame) _ASSERTE(pData->pCurrentExceptionRecord > pFrame); // The FastNExport SEH handler can be in the frame we just unwound and as a result just out of range. if (pCf->IsFrameless() && !FastNExportSEH((PEXCEPTION_REGISTRATION_RECORD)pData->pCurrentExceptionRecord)) { _ASSERTE((ULONG_PTR)pData->pCurrentExceptionRecord >= GetRegdisplaySP(pCf->GetRegisterSet())); } } if (pData->pPrevExceptionRecord) { // FCALLS have an extra SEH record in debug because of the desctructor // associated with ForbidGC checking. This is benign, so just ignore it. if (pFrame) _ASSERTE(pData->pPrevExceptionRecord < pFrame || pFrame->GetVTablePtr() == HelperMethodFrame::GetMethodFrameVPtr()); if (pCf->IsFrameless()) _ASSERTE((ULONG_PTR)pData->pPrevExceptionRecord <= GetRegdisplaySP(pCf->GetRegisterSet())); } } #endif UINT_PTR currentIP = 0; UINT_PTR currentSP = 0; if (pCf->IsFrameless()) { currentIP = (UINT_PTR)GetControlPC(pCf->GetRegisterSet()); currentSP = (UINT_PTR)GetRegdisplaySP(pCf->GetRegisterSet()); } else if (InlinedCallFrame::FrameHasActiveCall(pFrame)) { // don't have the IP, SP for native code currentIP = 0; currentSP = 0; } else { currentIP = (UINT_PTR)(pCf->GetFrame()->GetIP()); currentSP = 0; //Don't have an SP to get. } if (!pFunc->IsILStub()) { // Append the current frame to the stack trace and save the save trace to the managed Exception object. pExInfo->m_StackTraceInfo.AppendElement(pData->bAllowAllocMem, currentIP, currentSP, pFunc, pCf); pExInfo->m_StackTraceInfo.SaveStackTrace(pData->bAllowAllocMem, pThread->GetThrowableAsHandle(), pData->bReplaceStack, pData->bSkipLastElement); } else { LOG((LF_EH, LL_INFO1000, "COMPlusThrowCallback: Skipping AppendElement/SaveStackTrace for IL stub MD %p\n", pFunc)); } // Fire an exception thrown ETW event when an exception occurs ETW::ExceptionLog::ExceptionThrown(pCf, pData->bSkipLastElement, pData->bReplaceStack); // Reset the flags. These flags are set only once before each stack walk done by LookForHandler(), and // they apply only to the first frame we append to the stack trace. Subsequent frames are always appended. if (pData->bReplaceStack) { pData->bReplaceStack = FALSE; } if (pData->bSkipLastElement) { pData->bSkipLastElement = FALSE; } // now we've got the stack trace, if we aren't allowed to catch this and we're first pass, return if (pData->bDontCatch) return SWA_CONTINUE; if (!pCf->IsFrameless()) { // @todo - remove this once SIS is fully enabled. extern bool g_EnableSIS; if (g_EnableSIS) { // For debugger, we may want to notify 1st chance exceptions if they're coming out of a stub. // We recognize stubs as Frames with a M2U transition type. The debugger's stackwalker also // recognizes these frames and publishes ICorDebugInternalFrames in the stackwalk. It's // important to use pFrame as the stack address so that the Exception callback matches up // w/ the ICorDebugInternlFrame stack range. if (CORDebuggerAttached()) { Frame * pFrameStub = pCf->GetFrame(); Frame::ETransitionType t = pFrameStub->GetTransitionType(); if (t == Frame::TT_M2U) { // Use address of the frame as the stack address. currentSP = (SIZE_T) ((void*) pFrameStub); currentIP = 0; // no IP. EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedException(pThread, (SIZE_T)currentIP, (SIZE_T)currentSP); // Deliver the FirstChanceNotification after the debugger, if not already delivered. if (!pExInfo->DeliveredFirstChanceNotification()) { ExceptionNotifications::DeliverFirstChanceNotification(); } } } } return SWA_CONTINUE; } bool fIsILStub = pFunc->IsILStub(); bool fGiveDebuggerAndProfilerNotification = !fIsILStub; BOOL fMethodCanHandleException = TRUE; MethodDesc * pUserMDForILStub = NULL; Frame * pILStubFrame = NULL; if (fIsILStub) pUserMDForILStub = GetUserMethodForILStub(pThread, currentSP, pFunc, &pILStubFrame); // Let the profiler know that we are searching for a handler within this function instance if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionEnter(pFunc); // The following debugger notification and AppDomain::FirstChanceNotification should be scoped together // since the AD notification *must* follow immediately after the debugger's notification. { #ifdef DEBUGGING_SUPPORTED // // Go ahead and notify any debugger of this exception. // EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedException(pThread, (SIZE_T)currentIP, (SIZE_T)currentSP); if (CORDebuggerAttached() && pExInfo->m_ExceptionFlags.DebuggerInterceptInfo()) { return SWA_ABORT; } #endif // DEBUGGING_SUPPORTED // Attempt to deliver the first chance notification to the AD only *AFTER* the debugger // has done that, provided we have not already done that. if (!pExInfo->DeliveredFirstChanceNotification()) { ExceptionNotifications::DeliverFirstChanceNotification(); } } IJitManager* pJitManager = pCf->GetJitManager(); _ASSERTE(pJitManager); EH_CLAUSE_ENUMERATOR pEnumState; unsigned EHCount = pJitManager->InitializeEHEnumeration(pCf->GetMethodToken(), &pEnumState); if (EHCount == 0) { // Inform the profiler that we're leaving, and what pass we're on if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc); return SWA_CONTINUE; } TypeHandle thrownType = TypeHandle(); // if we are being called on an unwind for an exception that we did not try to catch, eg. // an internal EE exception, then pThread->GetThrowable will be null { OBJECTREF throwable = pThread->GetThrowable(); if (throwable != NULL) { throwable = PossiblyUnwrapThrowable(throwable, pCf->GetAssembly()); thrownType = TypeHandle(throwable->GetMethodTable()); } } PREGDISPLAY regs = pCf->GetRegisterSet(); BYTE *pStack = (BYTE *) GetRegdisplaySP(regs); #ifdef DEBUGGING_SUPPORTED BYTE *pHandlerEBP = (BYTE *) GetRegdisplayFP(regs); #endif DWORD offs = (DWORD)pCf->GetRelOffset(); //= (BYTE*) (*regs->pPC) - (BYTE*) pCf->GetStartAddress(); STRESS_LOG1(LF_EH, LL_INFO10000, "COMPlusThrowCallback: offset is %d\n", offs); EE_ILEXCEPTION_CLAUSE EHClause; unsigned start_adjust, end_adjust; start_adjust = !(pCf->HasFaulted() || pCf->IsIPadjusted()); end_adjust = pCf->IsActiveFunc(); for(ULONG i=0; i < EHCount; i++) { pJitManager->GetNextEHClause(&pEnumState, &EHClause); _ASSERTE(IsValidClause(&EHClause)); STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusThrowCallback: considering '%s' clause [%d,%d], ofs:%d\n", (IsFault(&EHClause) ? "fault" : ( IsFinally(&EHClause) ? "finally" : ( IsFilterHandler(&EHClause) ? "filter" : ( IsTypedHandler(&EHClause) ? "typed" : "unknown")))), EHClause.TryStartPC, EHClause.TryEndPC, offs ); // Checking the exception range is a bit tricky because // on CPU faults (null pointer access, div 0, ..., the IP points // to the faulting instruction, but on calls, the IP points // to the next instruction. // This means that we should not include the start point on calls // as this would be a call just preceding the try block. // Also, we should include the end point on calls, but not faults. // If we're in the FILTER part of a filter clause, then we // want to stop crawling. It's going to be caught in a // EX_CATCH just above us. If not, the exception if ( IsFilterHandler(&EHClause) && ( offs > EHClause.FilterOffset || (offs == EHClause.FilterOffset && !start_adjust) ) && ( offs < EHClause.HandlerStartPC || (offs == EHClause.HandlerStartPC && !end_adjust) )) { STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusThrowCallback: Fault inside filter [%d,%d] startAdj %d endAdj %d\n", EHClause.FilterOffset, EHClause.HandlerStartPC, start_adjust, end_adjust); if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc); return SWA_ABORT; } if ( (offs < EHClause.TryStartPC) || (offs > EHClause.TryEndPC) || (offs == EHClause.TryStartPC && start_adjust) || (offs == EHClause.TryEndPC && end_adjust)) continue; BOOL typeMatch = FALSE; BOOL isTypedHandler = IsTypedHandler(&EHClause); if (isTypedHandler && !thrownType.IsNull()) { if (EHClause.TypeHandle == (void*)(size_t)mdTypeRefNil) { // this is a catch(...) typeMatch = TRUE; } else { TypeHandle exnType = pJitManager->ResolveEHClause(&EHClause,pCf); // if doesn't have cached class then class wasn't loaded so couldn't have been thrown typeMatch = !exnType.IsNull() && ExceptionIsOfRightType(exnType, thrownType); } } // <TODO>@PERF: Is this too expensive? Consider storing the nesting level // instead of the HandlerEndPC.</TODO> // Determine the nesting level of EHClause. Just walk the table // again, and find out how many handlers enclose it DWORD nestingLevel = 0; if (IsFaultOrFinally(&EHClause)) continue; if (isTypedHandler) { LOG((LF_EH, LL_INFO100, "COMPlusThrowCallback: %s match for typed handler.\n", typeMatch?"Found":"Did not find")); if (!typeMatch) { continue; } } else { // Must be an exception filter (__except() part of __try{}__except(){}). nestingLevel = ComputeEnclosingHandlerNestingLevel(pJitManager, pCf->GetMethodToken(), EHClause.HandlerStartPC); // We just need *any* address within the method. This will let the debugger // resolve the EnC version of the method. PCODE pMethodAddr = GetControlPC(regs); if (fGiveDebuggerAndProfilerNotification) EEToDebuggerExceptionInterfaceWrapper::ExceptionFilter(pFunc, pMethodAddr, EHClause.FilterOffset, pHandlerEBP); UINT_PTR uStartAddress = (UINT_PTR)pCf->GetCodeInfo()->GetStartAddress(); // save clause information in the exinfo pExInfo->m_EHClauseInfo.SetInfo(COR_PRF_CLAUSE_FILTER, uStartAddress + EHClause.FilterOffset, StackFrame((UINT_PTR)pHandlerEBP)); // Let the profiler know we are entering a filter if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterEnter(pFunc); STRESS_LOG3(LF_EH, LL_INFO10, "COMPlusThrowCallback: calling filter code, EHClausePtr:%08x, Start:%08x, End:%08x\n", &EHClause, EHClause.HandlerStartPC, EHClause.HandlerEndPC); OBJECTREF throwable = PossiblyUnwrapThrowable(pThread->GetThrowable(), pCf->GetAssembly()); pExInfo->m_EHClauseInfo.SetManagedCodeEntered(TRUE); int iFilt = COMPlusThrowCallbackHelper(pJitManager, pCf, pData, &EHClause, nestingLevel, throwable, pThread); pExInfo->m_EHClauseInfo.SetManagedCodeEntered(FALSE); // Let the profiler know we are leaving a filter if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterLeave(); pExInfo->m_EHClauseInfo.ResetInfo(); if (pThread->IsRudeAbortInitiated()) { if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc); return SWA_CONTINUE; } // If this filter didn't want the exception, keep looking. if (EXCEPTION_EXECUTE_HANDLER != iFilt) continue; } // Record this location, to stop the unwind phase, later. pData->pFunc = pFunc; pData->dHandler = i; pData->pStack = pStack; // Notify the profiler that a catcher has been found if (fGiveDebuggerAndProfilerNotification) { EEToProfilerExceptionInterfaceWrapper::ExceptionSearchCatcherFound(pFunc); EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc); } #ifdef DEBUGGING_SUPPORTED // // Notify debugger that a catcher has been found. // if (fIsILStub) { EEToDebuggerExceptionInterfaceWrapper::NotifyOfCHFFilter(pExInfo->m_pExceptionPointers, pILStubFrame); } else if (fGiveDebuggerAndProfilerNotification && CORDebuggerAttached() && !pExInfo->m_ExceptionFlags.DebuggerInterceptInfo()) { _ASSERTE(pData); // We just need *any* address within the method. This will let the debugger // resolve the EnC version of the method. PCODE pMethodAddr = GetControlPC(regs); EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedExceptionCatcherFound(pThread, pData->pFunc, pMethodAddr, (SIZE_T)pData->pStack, &EHClause); } #endif // DEBUGGING_SUPPORTED return SWA_ABORT; } if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc); return SWA_CONTINUE; } // StackWalkAction COMPlusThrowCallback() //========================================================================== // COMPlusUnwindCallback //========================================================================== #if defined(_MSC_VER) #pragma warning(push) #pragma warning (disable : 4740) // There is inline asm code in this function, which disables // global optimizations. #pragma warning (disable : 4731) #endif StackWalkAction COMPlusUnwindCallback (CrawlFrame *pCf, ThrowCallbackType *pData) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_MODE_COOPERATIVE; _ASSERTE(pData->bIsUnwind); Frame *pFrame = pCf->GetFrame(); MethodDesc *pFunc = pCf->GetFunction(); #if defined(_DEBUG) #define METHODNAME(pFunc) (pFunc?pFunc->m_pszDebugMethodName:"<n/a>") #else #define METHODNAME(pFunc) "<n/a>" #endif STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: STACKCRAWL method:%pM ('%s'), Frame:%p, FrameVtable = %pV\n", pFunc, METHODNAME(pFunc), pFrame, pCf->IsFrameless()?0:(*(void**)pFrame)); #undef METHODNAME if (pFrame && pData->pTopFrame == pFrame) /* Don't look past limiting frame if there is one */ return SWA_ABORT; if (!pFunc) return SWA_CONTINUE; if (!pCf->IsFrameless()) return SWA_CONTINUE; Thread *pThread = GetThread(); // If the thread is being RudeAbort, we will not run any finally if (pThread->IsRudeAbortInitiated()) { return SWA_CONTINUE; } IJitManager* pJitManager = pCf->GetJitManager(); _ASSERTE(pJitManager); ExInfo *pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); PREGDISPLAY regs = pCf->GetRegisterSet(); BYTE *pStack = (BYTE *) GetRegdisplaySP(regs); TypeHandle thrownType = TypeHandle(); #ifdef DEBUGGING_SUPPORTED LOG((LF_EH, LL_INFO1000, "COMPlusUnwindCallback: Intercept %d, pData->pFunc 0x%X, pFunc 0x%X, pData->pStack 0x%X, pStack 0x%X\n", pExInfo->m_ExceptionFlags.DebuggerInterceptInfo(), pData->pFunc, pFunc, pData->pStack, pStack)); // // If the debugger wants to intercept this exception here, go do that. // if (pExInfo->m_ExceptionFlags.DebuggerInterceptInfo() && (pData->pFunc == pFunc) && (pData->pStack == pStack)) { goto LDoDebuggerIntercept; } #endif bool fGiveDebuggerAndProfilerNotification; fGiveDebuggerAndProfilerNotification = !pFunc->IsILStub(); // Notify the profiler of the function we're dealing with in the unwind phase if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionEnter(pFunc); EH_CLAUSE_ENUMERATOR pEnumState; unsigned EHCount = pJitManager->InitializeEHEnumeration(pCf->GetMethodToken(), &pEnumState); if (EHCount == 0) { // Inform the profiler that we're leaving, and what pass we're on if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pFunc); return SWA_CONTINUE; } // if we are being called on an unwind for an exception that we did not try to catch, eg. // an internal EE exception, then pThread->GetThrowable will be null { OBJECTREF throwable = pThread->GetThrowable(); if (throwable != NULL) { throwable = PossiblyUnwrapThrowable(throwable, pCf->GetAssembly()); thrownType = TypeHandle(throwable->GetMethodTable()); } } #ifdef DEBUGGING_SUPPORTED BYTE *pHandlerEBP; pHandlerEBP = (BYTE *) GetRegdisplayFP(regs); #endif DWORD offs; offs = (DWORD)pCf->GetRelOffset(); //= (BYTE*) (*regs->pPC) - (BYTE*) pCf->GetStartAddress(); LOG((LF_EH, LL_INFO100, "COMPlusUnwindCallback: current EIP offset in method 0x%x, \n", offs)); EE_ILEXCEPTION_CLAUSE EHClause; unsigned start_adjust, end_adjust; start_adjust = !(pCf->HasFaulted() || pCf->IsIPadjusted()); end_adjust = pCf->IsActiveFunc(); for(ULONG i=0; i < EHCount; i++) { pJitManager->GetNextEHClause(&pEnumState, &EHClause); _ASSERTE(IsValidClause(&EHClause)); STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: considering '%s' clause [%d,%d], offs:%d\n", (IsFault(&EHClause) ? "fault" : ( IsFinally(&EHClause) ? "finally" : ( IsFilterHandler(&EHClause) ? "filter" : ( IsTypedHandler(&EHClause) ? "typed" : "unknown")))), EHClause.TryStartPC, EHClause.TryEndPC, offs ); // Checking the exception range is a bit tricky because // on CPU faults (null pointer access, div 0, ..., the IP points // to the faulting instruction, but on calls, the IP points // to the next instruction. // This means that we should not include the start point on calls // as this would be a call just preceding the try block. // Also, we should include the end point on calls, but not faults. if ( IsFilterHandler(&EHClause) && ( offs > EHClause.FilterOffset || (offs == EHClause.FilterOffset && !start_adjust) ) && ( offs < EHClause.HandlerStartPC || (offs == EHClause.HandlerStartPC && !end_adjust) ) ) { STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: Fault inside filter [%d,%d] startAdj %d endAdj %d\n", EHClause.FilterOffset, EHClause.HandlerStartPC, start_adjust, end_adjust); // Make the filter as done. See comment in CallJitEHFilter // on why we have to do it here. Frame* pFilterFrame = pThread->GetFrame(); _ASSERTE(pFilterFrame->GetVTablePtr() == ExceptionFilterFrame::GetMethodFrameVPtr()); ((ExceptionFilterFrame*)pFilterFrame)->SetFilterDone(); // Inform the profiler that we're leaving, and what pass we're on if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pFunc); return SWA_ABORT; } if ( (offs < EHClause.TryStartPC) || (offs > EHClause.TryEndPC) || (offs == EHClause.TryStartPC && start_adjust) || (offs == EHClause.TryEndPC && end_adjust)) continue; // <TODO>@PERF : Is this too expensive? Consider storing the nesting level // instead of the HandlerEndPC.</TODO> // Determine the nesting level of EHClause. Just walk the table // again, and find out how many handlers enclose it DWORD nestingLevel = ComputeEnclosingHandlerNestingLevel(pJitManager, pCf->GetMethodToken(), EHClause.HandlerStartPC); // We just need *any* address within the method. This will let the debugger // resolve the EnC version of the method. PCODE pMethodAddr = GetControlPC(regs); UINT_PTR uStartAddress = (UINT_PTR)pCf->GetCodeInfo()->GetStartAddress(); if (IsFaultOrFinally(&EHClause)) { if (fGiveDebuggerAndProfilerNotification) EEToDebuggerExceptionInterfaceWrapper::ExceptionHandle(pFunc, pMethodAddr, EHClause.HandlerStartPC, pHandlerEBP); pExInfo->m_EHClauseInfo.SetInfo(COR_PRF_CLAUSE_FINALLY, uStartAddress + EHClause.HandlerStartPC, StackFrame((UINT_PTR)pHandlerEBP)); // Notify the profiler that we are about to execute the finally code if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFinallyEnter(pFunc); LOG((LF_EH, LL_INFO100, "COMPlusUnwindCallback: finally clause [%d,%d] - call\n", EHClause.TryStartPC, EHClause.TryEndPC)); pExInfo->m_EHClauseInfo.SetManagedCodeEntered(TRUE); ::CallJitEHFinally(pCf, (BYTE *)uStartAddress, &EHClause, nestingLevel); pExInfo->m_EHClauseInfo.SetManagedCodeEntered(FALSE); LOG((LF_EH, LL_INFO100, "COMPlusUnwindCallback: finally - returned\n")); // Notify the profiler that we are done with the finally code if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFinallyLeave(); pExInfo->m_EHClauseInfo.ResetInfo(); continue; } // Current is not a finally, check if it's the catching handler (or filter). if (pData->pFunc != pFunc || (ULONG)(pData->dHandler) != i || pData->pStack != pStack) { continue; } #ifdef _DEBUG gLastResumedExceptionFunc = pCf->GetFunction(); gLastResumedExceptionHandler = i; #endif // save clause information in the exinfo pExInfo->m_EHClauseInfo.SetInfo(COR_PRF_CLAUSE_CATCH, uStartAddress + EHClause.HandlerStartPC, StackFrame((UINT_PTR)pHandlerEBP)); // Notify the profiler that we are about to resume at the catcher. if (fGiveDebuggerAndProfilerNotification) { DACNotify::DoExceptionCatcherEnterNotification(pFunc, EHClause.HandlerStartPC); EEToProfilerExceptionInterfaceWrapper::ExceptionCatcherEnter(pThread, pFunc); EEToDebuggerExceptionInterfaceWrapper::ExceptionHandle(pFunc, pMethodAddr, EHClause.HandlerStartPC, pHandlerEBP); } STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: offset 0x%x matches clause [0x%x, 0x%x) matches in method %pM\n", offs, EHClause.TryStartPC, EHClause.TryEndPC, pFunc); // ResumeAtJitEH will set pExInfo->m_EHClauseInfo.m_fManagedCodeEntered = TRUE; at the appropriate time ::ResumeAtJitEH(pCf, (BYTE *)uStartAddress, &EHClause, nestingLevel, pThread, pData->bUnwindStack); //UNREACHABLE_MSG("ResumeAtJitEH shouldn't have returned!"); // we do not set pExInfo->m_EHClauseInfo.m_fManagedCodeEntered = FALSE here, // that happens when the catch clause calls back to COMPlusEndCatch } STRESS_LOG1(LF_EH, LL_INFO100, "COMPlusUnwindCallback: no handler found in method %pM\n", pFunc); if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pFunc); return SWA_CONTINUE; #ifdef DEBUGGING_SUPPORTED LDoDebuggerIntercept: STRESS_LOG1(LF_EH|LF_CORDB, LL_INFO100, "COMPlusUnwindCallback: Intercepting in method %pM\n", pFunc); // // Setup up the easy parts of the context to restart at. // EHContext context; // // Note: EAX ECX EDX are scratch // context.Esp = (DWORD)(size_t)(GetRegdisplaySP(regs)); context.Ebx = *regs->pEbx; context.Esi = *regs->pEsi; context.Edi = *regs->pEdi; context.Ebp = *regs->pEbp; // // Set scratch registers to 0 to avoid reporting incorrect values to GC in case of debugger changing the IP // in the middle of a scratch register lifetime (see Dev10 754922) // context.Eax = 0; context.Ecx = 0; context.Edx = 0; // // Ok, now set the target Eip to the address the debugger requested. // ULONG_PTR nativeOffset; pExInfo->m_DebuggerExState.GetDebuggerInterceptInfo(NULL, NULL, NULL, NULL, &nativeOffset, NULL); context.Eip = GetControlPC(regs) - (pCf->GetRelOffset() - nativeOffset); // // Finally we need to get the correct Esp for this nested level // context.Esp = pCf->GetCodeManager()->GetAmbientSP(regs, pCf->GetCodeInfo(), nativeOffset, pData->dHandler, pCf->GetCodeManState() ); // // In case we see unknown FS:[0] handlers we delay the interception point until we reach the handler that protects the interception point. // This way we have both FS:[0] handlers being poped up by RtlUnwind and managed capital F Frames being unwinded by managed stackwalker. // BOOL fCheckForUnknownHandler = TRUE; if (PopNestedExceptionRecords((LPVOID)(size_t)context.Esp, fCheckForUnknownHandler)) { // Let ClrDebuggerDoUnwindAndIntercept RtlUnwind continue to unwind frames until we reach the handler protected by COMPlusNestedExceptionHandler. pExInfo->m_InterceptionContext = context; pExInfo->m_ValidInterceptionContext = TRUE; STRESS_LOG0(LF_EH|LF_CORDB, LL_INFO100, "COMPlusUnwindCallback: Skip interception until unwinding reaches the actual handler protected by COMPlusNestedExceptionHandler\n"); } else { // // Pop off all the Exception information up to this point in the stack // UnwindExceptionTrackerAndResumeInInterceptionFrame(pExInfo, &context); } return SWA_ABORT; #endif // DEBUGGING_SUPPORTED } // StackWalkAction COMPlusUnwindCallback () #if defined(_MSC_VER) #pragma warning(pop) #endif #if defined(_MSC_VER) #pragma warning(push) #pragma warning (disable : 4740) // There is inline asm code in this function, which disables // global optimizations. #pragma warning (disable : 4731) #endif void ResumeAtJitEH(CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel, Thread *pThread, BOOL unwindStack) { // No dynamic contract here because this function doesn't return and destructors wouldn't be executed WRAPPER_NO_CONTRACT; EHContext context; context.Setup(PCODE(startPC + EHClausePtr->HandlerStartPC), pCf->GetRegisterSet()); size_t * pShadowSP = NULL; // Write Esp to *pShadowSP before jumping to handler size_t * pHandlerEnd = NULL; OBJECTREF throwable = PossiblyUnwrapThrowable(pThread->GetThrowable(), pCf->GetAssembly()); pCf->GetCodeManager()->FixContext(ICodeManager::CATCH_CONTEXT, &context, pCf->GetCodeInfo(), EHClausePtr->HandlerStartPC, nestingLevel, throwable, pCf->GetCodeManState(), &pShadowSP, &pHandlerEnd); if (pHandlerEnd) { *pHandlerEnd = EHClausePtr->HandlerEndPC; } MethodDesc* pMethodDesc = pCf->GetCodeInfo()->GetMethodDesc(); TADDR startAddress = pCf->GetCodeInfo()->GetStartAddress(); if (InlinedCallFrame::FrameHasActiveCall(pThread->m_pFrame)) { // When unwinding an exception in ReadyToRun, the JIT_PInvokeEnd helper which unlinks the ICF from // the thread will be skipped. This is because unlike jitted code, each pinvoke is wrapped by calls // to the JIT_PInvokeBegin and JIT_PInvokeEnd helpers, which push and pop the ICF on the thread. The // ICF is not linked at the method prolog and unlinked at the epilog when running R2R code. Since the // JIT_PInvokeEnd helper will be skipped, we need to unlink the ICF here. If the executing method // has another pinvoke, it will re-link the ICF again when the JIT_PInvokeBegin helper is called. // Check that the InlinedCallFrame is in the method with the exception handler. There can be other // InlinedCallFrame somewhere up the call chain that is not related to the current exception // handling. #ifdef DEBUG TADDR handlerFrameSP = pCf->GetRegisterSet()->SP; #endif // DEBUG // Find the ESP of the caller of the method with the exception handler. bool unwindSuccess = pCf->GetCodeManager()->UnwindStackFrame(pCf->GetRegisterSet(), pCf->GetCodeInfo(), pCf->GetCodeManagerFlags(), pCf->GetCodeManState(), NULL /* StackwalkCacheUnwindInfo* */); _ASSERTE(unwindSuccess); if (((TADDR)pThread->m_pFrame < pCf->GetRegisterSet()->SP) && ExecutionManager::IsReadyToRunCode(((InlinedCallFrame*)pThread->m_pFrame)->m_pCallerReturnAddress)) { _ASSERTE((TADDR)pThread->m_pFrame >= handlerFrameSP); pThread->m_pFrame->Pop(pThread); } } // save esp so that endcatch can restore it (it always restores, so want correct value) ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); pExInfo->m_dEsp = (LPVOID)context.GetSP(); LOG((LF_EH, LL_INFO1000, "ResumeAtJitEH: current m_dEsp set to %p\n", context.GetSP())); PVOID dEsp = GetCurrentSP(); if (!unwindStack) { // If we don't want to unwind the stack, then the guard page had better not be gone! _ASSERTE(pThread->DetermineIfGuardPagePresent()); // so down below won't really update esp context.SetSP(dEsp); pExInfo->m_pShadowSP = pShadowSP; // so that endcatch can zero it back if (pShadowSP) { *pShadowSP = (size_t)dEsp; } } else { // so shadow SP has the real SP as we are going to unwind the stack dEsp = (LPVOID)context.GetSP(); // BEGIN: pExInfo->UnwindExInfo(dEsp); ExInfo *pPrevNestedInfo = pExInfo->m_pPrevNestedInfo; while (pPrevNestedInfo && pPrevNestedInfo->m_StackAddress < dEsp) { LOG((LF_EH, LL_INFO1000, "ResumeAtJitEH: popping nested ExInfo at 0x%p\n", pPrevNestedInfo->m_StackAddress)); pPrevNestedInfo->DestroyExceptionHandle(); pPrevNestedInfo->m_StackTraceInfo.FreeStackTrace(); #ifdef DEBUGGING_SUPPORTED if (g_pDebugInterface != NULL) { g_pDebugInterface->DeleteInterceptContext(pPrevNestedInfo->m_DebuggerExState.GetDebuggerInterceptContext()); } #endif // DEBUGGING_SUPPORTED pPrevNestedInfo = pPrevNestedInfo->m_pPrevNestedInfo; } pExInfo->m_pPrevNestedInfo = pPrevNestedInfo; _ASSERTE(pExInfo->m_pPrevNestedInfo == 0 || pExInfo->m_pPrevNestedInfo->m_StackAddress >= dEsp); // Before we unwind the SEH records, get the Frame from the top-most nested exception record. Frame* pNestedFrame = GetCurrFrame(FindNestedEstablisherFrame(GetCurrentSEHRecord())); PopNestedExceptionRecords((LPVOID)(size_t)dEsp); EXCEPTION_REGISTRATION_RECORD* pNewBottomMostHandler = GetCurrentSEHRecord(); pExInfo->m_pShadowSP = pShadowSP; // The context and exception record are no longer any good. _ASSERTE(pExInfo->m_pContext < dEsp); // It must be off the top of the stack. pExInfo->m_pContext = 0; // Whack it. pExInfo->m_pExceptionRecord = 0; pExInfo->m_pExceptionPointers = 0; // We're going to put one nested record back on the stack before we resume. This is // where it goes. NestedHandlerExRecord *pNestedHandlerExRecord = (NestedHandlerExRecord*)((BYTE*)dEsp - ALIGN_UP(sizeof(NestedHandlerExRecord), STACK_ALIGN_SIZE)); // The point of no return. The next statement starts scribbling on the stack. It's // deep enough that we won't hit our own locals. (That's important, 'cuz we're still // using them.) // _ASSERTE(dEsp > &pCf); pNestedHandlerExRecord->m_handlerInfo.m_hThrowable=NULL; // This is random memory. Handle // must be initialized to null before // calling Init(), as Init() will try // to free any old handle. pNestedHandlerExRecord->Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, pNestedFrame); INSTALL_EXCEPTION_HANDLING_RECORD(&(pNestedHandlerExRecord->m_ExReg)); context.SetSP(pNestedHandlerExRecord); // We might have moved the bottommost handler. The nested record itself is never // the bottom most handler -- it's pushed after the fact. So we have to make the // bottom-most handler the one BEFORE the nested record. if (pExInfo->m_pBottomMostHandler < pNewBottomMostHandler) { STRESS_LOG3(LF_EH, LL_INFO10000, "ResumeAtJitEH: setting ExInfo:0x%p m_pBottomMostHandler from 0x%p to 0x%p\n", pExInfo, pExInfo->m_pBottomMostHandler, pNewBottomMostHandler); pExInfo->m_pBottomMostHandler = pNewBottomMostHandler; } if (pShadowSP) { *pShadowSP = context.GetSP(); } } STRESS_LOG3(LF_EH, LL_INFO100, "ResumeAtJitEH: resuming at EIP = %p ESP = %p EBP = %p\n", context.Eip, context.GetSP(), context.GetFP()); #ifdef STACK_GUARDS_DEBUG // We are transitioning back to managed code, so ensure that we are in // SO-tolerant mode before we do so. RestoreSOToleranceState(); #endif // we want this to happen as late as possible but certainly after the notification // that the handle for the current ExInfo has been freed has been delivered pExInfo->m_EHClauseInfo.SetManagedCodeEntered(TRUE); ETW::ExceptionLog::ExceptionCatchBegin(pMethodDesc, (PVOID)startAddress); ResumeAtJitEHHelper(&context); UNREACHABLE_MSG("Should never return from ResumeAtJitEHHelper!"); // we do not set pExInfo->m_EHClauseInfo.m_fManagedCodeEntered = FALSE here, // that happens when the catch clause calls back to COMPlusEndCatch // we don't return to this point so it would be moot (see unreachable_msg above) } #if defined(_MSC_VER) #pragma warning(pop) #endif // Must be in a separate function because INSTALL_COMPLUS_EXCEPTION_HANDLER has a filter int CallJitEHFilterWorker(size_t *pShadowSP, EHContext *pContext) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_COOPERATIVE; int retVal = EXCEPTION_CONTINUE_SEARCH; BEGIN_CALL_TO_MANAGED(); retVal = CallJitEHFilterHelper(pShadowSP, pContext); END_CALL_TO_MANAGED(); return retVal; } int CallJitEHFilter(CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel, OBJECTREF thrownObj) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_COOPERATIVE; int retVal = EXCEPTION_CONTINUE_SEARCH; size_t * pShadowSP = NULL; EHContext context; context.Setup(PCODE(startPC + EHClausePtr->FilterOffset), pCf->GetRegisterSet()); size_t * pEndFilter = NULL; // Write pCf->GetCodeManager()->FixContext(ICodeManager::FILTER_CONTEXT, &context, pCf->GetCodeInfo(), EHClausePtr->FilterOffset, nestingLevel, thrownObj, pCf->GetCodeManState(), &pShadowSP, &pEndFilter); // End of the filter is the same as start of handler if (pEndFilter) { *pEndFilter = EHClausePtr->HandlerStartPC; } // ExceptionFilterFrame serves two purposes: // // 1. It serves as a frame that stops the managed search for handler // if we fault in the filter. ThrowCallbackType.pTopFrame is going point // to this frame during search for exception handler inside filter. // The search for handler needs a frame to stop. If we had no frame here, // the exceptions in filters would not be swallowed correctly since we would // walk past the EX_TRY/EX_CATCH block in COMPlusThrowCallbackHelper. // // 2. It allows setting of SHADOW_SP_FILTER_DONE flag in UnwindFrames() // if we fault in the filter. We have to set this flag together with unwinding // of the filter frame. Using a regular C++ holder to clear this flag here would cause // GC holes. The stack would be in inconsistent state when we trigger gc just before // returning from UnwindFrames. FrameWithCookie<ExceptionFilterFrame> exceptionFilterFrame(pShadowSP); ETW::ExceptionLog::ExceptionFilterBegin(pCf->GetCodeInfo()->GetMethodDesc(), (PVOID)pCf->GetCodeInfo()->GetStartAddress()); retVal = CallJitEHFilterWorker(pShadowSP, &context); ETW::ExceptionLog::ExceptionFilterEnd(); exceptionFilterFrame.Pop(); return retVal; } void CallJitEHFinally(CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel) { WRAPPER_NO_CONTRACT; EHContext context; context.Setup(PCODE(startPC + EHClausePtr->HandlerStartPC), pCf->GetRegisterSet()); size_t * pShadowSP = NULL; // Write Esp to *pShadowSP before jumping to handler size_t * pFinallyEnd = NULL; pCf->GetCodeManager()->FixContext( ICodeManager::FINALLY_CONTEXT, &context, pCf->GetCodeInfo(), EHClausePtr->HandlerStartPC, nestingLevel, ObjectToOBJECTREF((Object *) NULL), pCf->GetCodeManState(), &pShadowSP, &pFinallyEnd); if (pFinallyEnd) { *pFinallyEnd = EHClausePtr->HandlerEndPC; } ETW::ExceptionLog::ExceptionFinallyBegin(pCf->GetCodeInfo()->GetMethodDesc(), (PVOID)pCf->GetCodeInfo()->GetStartAddress()); CallJitEHFinallyHelper(pShadowSP, &context); ETW::ExceptionLog::ExceptionFinallyEnd(); // // Update the registers using new context // // This is necessary to reflect GC pointer changes during the middle of a unwind inside a // finally clause, because: // 1. GC won't see the part of stack inside try (which has thrown an exception) that is already // unwinded and thus GC won't update GC pointers for this portion of the stack, but rather the // call stack in finally. // 2. upon return of finally, the unwind process continues and unwinds stack based on the part // of stack inside try and won't see the updated values in finally. // As a result, we need to manually update the context using register values upon return of finally // // Note that we only update the registers for finally clause because // 1. For filter handlers, stack walker is able to see the whole stack (including the try part) // with the help of ExceptionFilterFrame as filter handlers are called in first pass // 2. For catch handlers, the current unwinding is already finished // context.UpdateFrame(pCf->GetRegisterSet()); // This does not need to be guarded by a holder because the frame is dead if an exception gets thrown. Filters are different // since they are run in the first pass, so we must update the shadowSP reset in CallJitEHFilter. if (pShadowSP) { *pShadowSP = 0; // reset the shadowSP to 0 } } #if defined(_MSC_VER) #pragma warning (default : 4731) #endif //===================================================================== // ********************************************************************* BOOL ComPlusFrameSEH(EXCEPTION_REGISTRATION_RECORD* pEHR) { LIMITED_METHOD_CONTRACT; return ((LPVOID)pEHR->Handler == (LPVOID)COMPlusFrameHandler || (LPVOID)pEHR->Handler == (LPVOID)COMPlusNestedExceptionHandler); } // //------------------------------------------------------------------------- // This is installed when we call COMPlusFrameHandler to provide a bound to // determine when are within a nested exception //------------------------------------------------------------------------- EXCEPTION_HANDLER_IMPL(COMPlusNestedExceptionHandler) { WRAPPER_NO_CONTRACT; if (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND)) { LOG((LF_EH, LL_INFO100, " COMPlusNestedHandler(unwind) with %x at %x\n", pExceptionRecord->ExceptionCode, pContext ? GetIP(pContext) : 0)); // We're unwinding past a nested exception record, which means that we've thrown // a new exception out of a region in which we're handling a previous one. The // previous exception is overridden -- and needs to be unwound. // The preceding is ALMOST true. There is one more case, where we use setjmp/longjmp // from withing a nested handler. We won't have a nested exception in that case -- just // the unwind. Thread* pThread = GetThread(); ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); ExInfo* pPrevNestedInfo = pExInfo->m_pPrevNestedInfo; if (pPrevNestedInfo == &((NestedHandlerExRecord*)pEstablisherFrame)->m_handlerInfo) { _ASSERTE(pPrevNestedInfo); LOG((LF_EH, LL_INFO100, "COMPlusNestedExceptionHandler: PopExInfo(): popping nested ExInfo at 0x%p\n", pPrevNestedInfo)); pPrevNestedInfo->DestroyExceptionHandle(); pPrevNestedInfo->m_StackTraceInfo.FreeStackTrace(); #ifdef DEBUGGING_SUPPORTED if (g_pDebugInterface != NULL) { g_pDebugInterface->DeleteInterceptContext(pPrevNestedInfo->m_DebuggerExState.GetDebuggerInterceptContext()); } #endif // DEBUGGING_SUPPORTED pExInfo->m_pPrevNestedInfo = pPrevNestedInfo->m_pPrevNestedInfo; } else { // The whacky setjmp/longjmp case. Nothing to do. } } else { LOG((LF_EH, LL_INFO100, " InCOMPlusNestedHandler with %x at %x\n", pExceptionRecord->ExceptionCode, pContext ? GetIP(pContext) : 0)); } // There is a nasty "gotcha" in the way exception unwinding, finally's, and nested exceptions // interact. Here's the scenario ... it involves two exceptions, one normal one, and one // raised in a finally. // // The first exception occurs, and is caught by some handler way up the stack. That handler // calls RtlUnwind -- and handlers that didn't catch this first exception are called again, with // the UNWIND flag set. If, one of the handlers throws an exception during // unwind (like, a throw from a finally) -- then that same handler is not called during // the unwind pass of the second exception. [ASIDE: It is called on first-pass.] // // What that means is -- the COMPlusExceptionHandler, can't count on unwinding itself correctly // if an exception is thrown from a finally. Instead, it relies on the NestedExceptionHandler // that it pushes for this. // EXCEPTION_DISPOSITION retval = EXCEPTION_HANDLER_FWD(COMPlusFrameHandler); LOG((LF_EH, LL_INFO100, "Leaving COMPlusNestedExceptionHandler with %d\n", retval)); return retval; } EXCEPTION_REGISTRATION_RECORD *FindNestedEstablisherFrame(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame) { LIMITED_METHOD_CONTRACT; while (pEstablisherFrame->Handler != (PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler) { pEstablisherFrame = pEstablisherFrame->Next; _ASSERTE(pEstablisherFrame != EXCEPTION_CHAIN_END); // should always find one } return pEstablisherFrame; } EXCEPTION_HANDLER_IMPL(FastNExportExceptHandler) { WRAPPER_NO_CONTRACT; // Most of our logic is in commin with COMPlusFrameHandler. EXCEPTION_DISPOSITION retval = EXCEPTION_HANDLER_FWD(COMPlusFrameHandler); #ifdef _DEBUG // If the exception is escaping the last CLR personality routine on the stack, // then state a flag on the thread to indicate so. if (retval == ExceptionContinueSearch) { SetReversePInvokeEscapingUnhandledExceptionStatus(IS_UNWINDING(pExceptionRecord->ExceptionFlags), pEstablisherFrame); } #endif // _DEBUG return retval; } #ifdef FEATURE_COMINTEROP // The reverse COM interop path needs to be sure to pop the ComMethodFrame that is pushed, but we do not want // to have an additional FS:0 handler between the COM callsite and the call into managed. So we push this // FS:0 handler, which will defer to the usual COMPlusFrameHandler and then perform the cleanup of the // ComMethodFrame, if needed. EXCEPTION_HANDLER_IMPL(COMPlusFrameHandlerRevCom) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_ANY; // Defer to COMPlusFrameHandler EXCEPTION_DISPOSITION result = EXCEPTION_HANDLER_FWD(COMPlusFrameHandler); if (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND)) { // Do cleanup as needed ComMethodFrame::DoSecondPassHandlerCleanup(GetCurrFrame(pEstablisherFrame)); } return result; } #endif // FEATURE_COMINTEROP #endif // !DACCESS_COMPILE #endif // !FEATURE_EH_FUNCLETS PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(CONTEXT * pContext) { LIMITED_METHOD_DAC_CONTRACT; UINT_PTR stackSlot = pContext->Ebp + REDIRECTSTUB_EBP_OFFSET_CONTEXT; PTR_PTR_CONTEXT ppContext = dac_cast<PTR_PTR_CONTEXT>((TADDR)stackSlot); return *ppContext; } #ifndef DACCESS_COMPILE LONG CLRNoCatchHandler(EXCEPTION_POINTERS* pExceptionInfo, PVOID pv) { #ifndef FEATURE_EH_FUNCLETS WRAPPER_NO_CONTRACT; STATIC_CONTRACT_ENTRY_POINT; LONG result = EXCEPTION_CONTINUE_SEARCH; // This function can be called during the handling of a SO //BEGIN_ENTRYPOINT_VOIDRET; result = CLRVectoredExceptionHandler(pExceptionInfo); if (EXCEPTION_EXECUTE_HANDLER == result) { result = EXCEPTION_CONTINUE_SEARCH; } //END_ENTRYPOINT_VOIDRET; return result; #else // !FEATURE_EH_FUNCLETS return EXCEPTION_CONTINUE_SEARCH; #endif // !FEATURE_EH_FUNCLETS } // Returns TRUE if caller should resume execution. BOOL AdjustContextForVirtualStub( EXCEPTION_RECORD *pExceptionRecord, CONTEXT *pContext) { LIMITED_METHOD_CONTRACT; Thread * pThread = GetThreadNULLOk(); // We may not have a managed thread object. Example is an AV on the helper thread. // (perhaps during StubManager::IsStub) if (pThread == NULL) { return FALSE; } PCODE f_IP = GetIP(pContext); VirtualCallStubManager::StubKind sk; VirtualCallStubManager *pMgr = VirtualCallStubManager::FindStubManager(f_IP, &sk); if (sk == VirtualCallStubManager::SK_DISPATCH) { if (*PTR_WORD(f_IP) != X86_INSTR_CMP_IND_ECX_IMM32) { _ASSERTE(!"AV in DispatchStub at unknown instruction"); return FALSE; } } else if (sk == VirtualCallStubManager::SK_RESOLVE) { if (*PTR_WORD(f_IP) != X86_INSTR_MOV_EAX_ECX_IND) { _ASSERTE(!"AV in ResolveStub at unknown instruction"); return FALSE; } SetSP(pContext, dac_cast<PCODE>(dac_cast<PTR_BYTE>(GetSP(pContext)) + sizeof(void*))); // rollback push eax } else { return FALSE; } PCODE callsite = *dac_cast<PTR_PCODE>(GetSP(pContext)); if (pExceptionRecord != NULL) { pExceptionRecord->ExceptionAddress = (PVOID)callsite; } SetIP(pContext, callsite); #if defined(GCCOVER_TOLERATE_SPURIOUS_AV) // Modify LastAVAddress saved in thread to distinguish between fake & real AV // See comments in IsGcMarker in file excep.cpp for more details pThread->SetLastAVAddress((LPVOID)GetIP(pContext)); #endif // defined(GCCOVER_TOLERATE_SPURIOUS_AV) // put ESP back to what it was before the call. TADDR sp = GetSP(pContext) + sizeof(void*); #ifndef UNIX_X86_ABI // set the ESP to what it would be after the call (remove pushed arguments) size_t stackArgumentsSize; if (sk == VirtualCallStubManager::SK_DISPATCH) { ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE(); DispatchHolder *holder = DispatchHolder::FromDispatchEntry(f_IP); MethodTable *pMT = (MethodTable*)holder->stub()->expectedMT(); DispatchToken token(VirtualCallStubManager::GetTokenFromStubQuick(pMgr, f_IP, sk)); MethodDesc* pMD = VirtualCallStubManager::GetRepresentativeMethodDescFromToken(token, pMT); stackArgumentsSize = pMD->SizeOfArgStack(); } else { // Compute the stub entry address from the address of failure (location of dereferencing of "this" pointer) ResolveHolder *holder = ResolveHolder::FromResolveEntry(f_IP - ResolveStub::offsetOfThisDeref()); stackArgumentsSize = holder->stub()->stackArgumentsSize(); } sp += stackArgumentsSize; #endif // UNIX_X86_ABI SetSP(pContext, dac_cast<PCODE>(dac_cast<PTR_BYTE>(sp))); return TRUE; } #endif // !DACCESS_COMPILE
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // /* EXCEP.CPP: * */ #include "common.h" #include "frames.h" #include "excep.h" #include "object.h" #include "field.h" #include "dbginterface.h" #include "cgensys.h" #include "comutilnative.h" #include "sigformat.h" #include "siginfo.hpp" #include "gcheaputilities.h" #include "eedbginterfaceimpl.h" //so we can clearexception in COMPlusThrow #include "eventtrace.h" #include "eetoprofinterfacewrapper.inl" #include "eedbginterfaceimpl.inl" #include "dllimportcallback.h" #include "threads.h" #include "eeconfig.h" #include "vars.hpp" #include "generics.h" #include "asmconstants.h" #include "virtualcallstub.h" #ifndef FEATURE_EH_FUNCLETS MethodDesc * GetUserMethodForILStub(Thread * pThread, UINT_PTR uStubSP, MethodDesc * pILStubMD, Frame ** ppFrameOut); #if !defined(DACCESS_COMPILE) #define FORMAT_MESSAGE_BUFFER_LENGTH 1024 BOOL ComPlusFrameSEH(EXCEPTION_REGISTRATION_RECORD*); PEXCEPTION_REGISTRATION_RECORD GetPrevSEHRecord(EXCEPTION_REGISTRATION_RECORD*); extern "C" { // in asmhelpers.asm: VOID STDCALL ResumeAtJitEHHelper(EHContext *pContext); int STDCALL CallJitEHFilterHelper(size_t *pShadowSP, EHContext *pContext); VOID STDCALL CallJitEHFinallyHelper(size_t *pShadowSP, EHContext *pContext); typedef void (*RtlUnwindCallbackType)(void); BOOL CallRtlUnwind(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, RtlUnwindCallbackType callback, EXCEPTION_RECORD *pExceptionRecord, void *retval); BOOL CallRtlUnwindSafe(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, RtlUnwindCallbackType callback, EXCEPTION_RECORD *pExceptionRecord, void *retval); } static inline BOOL CPFH_ShouldUnwindStack(const EXCEPTION_RECORD * pCER) { LIMITED_METHOD_CONTRACT; _ASSERTE(pCER != NULL); // We can only unwind those exceptions whose context/record we don't need for a // rethrow. This is complus, and stack overflow. For all the others, we // need to keep the context around for a rethrow, which means they can't // be unwound. if (IsComPlusException(pCER) || pCER->ExceptionCode == STATUS_STACK_OVERFLOW) return TRUE; else return FALSE; } static inline BOOL IsComPlusNestedExceptionRecord(EXCEPTION_REGISTRATION_RECORD* pEHR) { LIMITED_METHOD_CONTRACT; if (pEHR->Handler == (PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler) return TRUE; return FALSE; } EXCEPTION_REGISTRATION_RECORD *TryFindNestedEstablisherFrame(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame) { LIMITED_METHOD_CONTRACT; while (pEstablisherFrame->Handler != (PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler) { pEstablisherFrame = pEstablisherFrame->Next; if (pEstablisherFrame == EXCEPTION_CHAIN_END) return 0; } return pEstablisherFrame; } #ifdef _DEBUG // stores last handler we went to in case we didn't get an endcatch and stack is // corrupted we can figure out who did it. static MethodDesc *gLastResumedExceptionFunc = NULL; static DWORD gLastResumedExceptionHandler = 0; #endif //--------------------------------------------------------------------- // void RtlUnwindCallback() // call back function after global unwind, rtlunwind calls this function //--------------------------------------------------------------------- static void RtlUnwindCallback() { LIMITED_METHOD_CONTRACT; _ASSERTE(!"Should never get here"); } BOOL FastNExportSEH(EXCEPTION_REGISTRATION_RECORD* pEHR) { LIMITED_METHOD_CONTRACT; if ((LPVOID)pEHR->Handler == (LPVOID)FastNExportExceptHandler) return TRUE; return FALSE; } BOOL ReverseCOMSEH(EXCEPTION_REGISTRATION_RECORD* pEHR) { LIMITED_METHOD_CONTRACT; #ifdef FEATURE_COMINTEROP if ((LPVOID)pEHR->Handler == (LPVOID)COMPlusFrameHandlerRevCom) return TRUE; #endif // FEATURE_COMINTEROP return FALSE; } // // Returns true if the given SEH handler is one of our SEH handlers that is responsible for managing exceptions in // regions of managed code. // BOOL IsUnmanagedToManagedSEHHandler(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame) { WRAPPER_NO_CONTRACT; // // ComPlusFrameSEH() is for COMPlusFrameHandler & COMPlusNestedExceptionHandler. // FastNExportSEH() is for FastNExportExceptHandler. // return (ComPlusFrameSEH(pEstablisherFrame) || FastNExportSEH(pEstablisherFrame) || ReverseCOMSEH(pEstablisherFrame)); } Frame *GetCurrFrame(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame) { Frame *pFrame; WRAPPER_NO_CONTRACT; _ASSERTE(IsUnmanagedToManagedSEHHandler(pEstablisherFrame)); pFrame = ((FrameHandlerExRecord *)pEstablisherFrame)->GetCurrFrame(); // Assert that the exception frame is on the thread or that the exception frame is the top frame. _ASSERTE(GetThreadNULLOk() == NULL || GetThread()->GetFrame() == (Frame*)-1 || GetThread()->GetFrame() <= pFrame); return pFrame; } EXCEPTION_REGISTRATION_RECORD* GetNextCOMPlusSEHRecord(EXCEPTION_REGISTRATION_RECORD* pRec) { WRAPPER_NO_CONTRACT; if (pRec == EXCEPTION_CHAIN_END) return EXCEPTION_CHAIN_END; do { _ASSERTE(pRec != 0); pRec = pRec->Next; } while (pRec != EXCEPTION_CHAIN_END && !IsUnmanagedToManagedSEHHandler(pRec)); _ASSERTE(pRec == EXCEPTION_CHAIN_END || IsUnmanagedToManagedSEHHandler(pRec)); return pRec; } /* * GetClrSEHRecordServicingStackPointer * * This function searchs all the Frame SEH records, and finds the one that is * currently signed up to do all exception handling for the given stack pointer * on the given thread. * * Parameters: * pThread - The thread to search on. * pStackPointer - The stack location that we are finding the Frame SEH Record for. * * Returns * A pointer to the SEH record, or EXCEPTION_CHAIN_END if none was found. * */ PEXCEPTION_REGISTRATION_RECORD GetClrSEHRecordServicingStackPointer(Thread *pThread, void *pStackPointer) { ThreadExceptionState* pExState = pThread->GetExceptionState(); // // We can only do this if there is a context in the pExInfo. There are cases (most notably the // EEPolicy::HandleFatalError case) where we don't have that. In these cases we will return // no enclosing handler since we cannot accurately determine the FS:0 entry which services // this stack address. // // The side effect of this is that for these cases, the debugger cannot intercept // the exception // CONTEXT* pContextRecord = pExState->GetContextRecord(); if (pContextRecord == NULL) { return EXCEPTION_CHAIN_END; } void *exceptionSP = dac_cast<PTR_VOID>(GetSP(pContextRecord)); // // Now set the establishing frame. What this means in English is that we need to find // the fs:0 entry that handles exceptions for the place on the stack given in stackPointer. // PEXCEPTION_REGISTRATION_RECORD pSEHRecord = GetFirstCOMPlusSEHRecord(pThread); while (pSEHRecord != EXCEPTION_CHAIN_END) { // // Skip any SEHRecord which is not a CLR record or was pushed after the exception // on this thread occurred. // if (IsUnmanagedToManagedSEHHandler(pSEHRecord) && (exceptionSP <= (void *)pSEHRecord)) { Frame *pFrame = GetCurrFrame(pSEHRecord); // // Arcane knowledge here. All Frame records are stored on the stack by the runtime // in ever decreasing address space. So, we merely have to search back until // we find the first frame record with a higher stack value to find the // establishing frame for the given stack address. // if (((void *)pFrame) >= pStackPointer) { break; } } pSEHRecord = GetNextCOMPlusSEHRecord(pSEHRecord); } return pSEHRecord; } #ifdef _DEBUG // We've deteremined during a stack walk that managed code is transitioning to unamanaged (EE) code. Check that the // state of the EH chain is correct. // // For x86, check that we do INSTALL_COMPLUS_EXCEPTION_HANDLER before calling managed code. This check should be // done for all managed code sites, not just transistions. But this will catch most problem cases. void VerifyValidTransitionFromManagedCode(Thread *pThread, CrawlFrame *pCF) { WRAPPER_NO_CONTRACT; _ASSERTE(ExecutionManager::IsManagedCode(GetControlPC(pCF->GetRegisterSet()))); // Cannot get to the TEB of other threads. So ignore them. if (pThread != GetThreadNULLOk()) { return; } // Find the EH record guarding the current region of managed code, based on the CrawlFrame passed in. PEXCEPTION_REGISTRATION_RECORD pEHR = GetCurrentSEHRecord(); while ((pEHR != EXCEPTION_CHAIN_END) && ((ULONG_PTR)pEHR < GetRegdisplaySP(pCF->GetRegisterSet()))) { pEHR = pEHR->Next; } // VerifyValidTransitionFromManagedCode can be called before the CrawlFrame's MethodDesc is initialized. // Fix that if necessary for the consistency check. MethodDesc * pFunction = pCF->GetFunction(); if ((!IsUnmanagedToManagedSEHHandler(pEHR)) && // Will the assert fire? If not, don't waste our time. (pFunction == NULL)) { _ASSERTE(pCF->GetRegisterSet()); PCODE ip = GetControlPC(pCF->GetRegisterSet()); pFunction = ExecutionManager::GetCodeMethodDesc(ip); _ASSERTE(pFunction); } // Great, we've got the EH record that's next up the stack from the current SP (which is in managed code). That // had better be a record for one of our handlers responsible for handling exceptions in managed code. If its // not, then someone made it into managed code without setting up one of our EH handlers, and that's really // bad. CONSISTENCY_CHECK_MSGF(IsUnmanagedToManagedSEHHandler(pEHR), ("Invalid transition into managed code!\n\n" "We're walking this thread's stack and we've reached a managed frame at Esp=0x%p. " "(The method is %s::%s) " "The very next FS:0 record (0x%p) up from this point on the stack should be one of " "our 'unmanaged to managed SEH handlers', but its not... its something else, and " "that's very bad. It indicates that someone managed to call into managed code without " "setting up the proper exception handling.\n\n" "Get a good unmanaged stack trace for this thread. All FS:0 records are on the stack, " "so you can see who installed the last handler. Somewhere between that function and " "where the thread is now is where the bad transition occurred.\n\n" "A little extra info: FS:0 = 0x%p, pEHR->Handler = 0x%p\n", GetRegdisplaySP(pCF->GetRegisterSet()), pFunction ->m_pszDebugClassName, pFunction ->m_pszDebugMethodName, pEHR, GetCurrentSEHRecord(), pEHR->Handler)); } #endif //================================================================================ // There are some things that should never be true when handling an // exception. This function checks for them. Will assert or trap // if it finds an error. static inline void CPFH_VerifyThreadIsInValidState(Thread* pThread, DWORD exceptionCode, EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame) { WRAPPER_NO_CONTRACT; if ( exceptionCode == STATUS_BREAKPOINT || exceptionCode == STATUS_SINGLE_STEP) { return; } #ifdef _DEBUG // check for overwriting of stack CheckStackBarrier(pEstablisherFrame); // trigger check for bad fs:0 chain GetCurrentSEHRecord(); #endif if (!g_fEEShutDown) { // An exception on the GC thread, or while holding the thread store lock, will likely lock out the entire process. if (::IsGCThread() || ThreadStore::HoldingThreadStore()) { _ASSERTE(!"Exception during garbage collection or while holding thread store"); EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); } } } #ifdef FEATURE_HIJACK void CPFH_AdjustContextForThreadSuspensionRace(CONTEXT *pContext, Thread *pThread) { WRAPPER_NO_CONTRACT; PCODE f_IP = GetIP(pContext); if (Thread::IsAddrOfRedirectFunc((PVOID)f_IP)) { // This is a very rare case where we tried to redirect a thread that was // just about to dispatch an exception, and our update of EIP took, but // the thread continued dispatching the exception. // // If this should happen (very rare) then we fix it up here. // _ASSERTE(pThread->GetSavedRedirectContext()); SetIP(pContext, GetIP(pThread->GetSavedRedirectContext())); STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 1 setting IP = %x\n", pContext->Eip); } if (f_IP == GetEEFuncEntryPoint(THROW_CONTROL_FOR_THREAD_FUNCTION)) { // This is a very rare case where we tried to redirect a thread that was // just about to dispatch an exception, and our update of EIP took, but // the thread continued dispatching the exception. // // If this should happen (very rare) then we fix it up here. // SetIP(pContext, GetIP(pThread->m_OSContext)); STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 2 setting IP = %x\n", pContext->Eip); } // We have another even rarer race condition: // - A) On thread A, Debugger puts an int 3 in the code stream at address X // - A) We hit it and the begin an exception. The eip will be X + 1 (int3 is special) // - B) Meanwhile, thread B redirects A's eip to Y. (Although A is really somewhere // in the kernel, it looks like it's still in user code, so it can fall under the // HandledJitCase and can be redirected) // - A) The OS, trying to be nice, expects we have a breakpoint exception at X+1, // but does -1 on the address since it knows int3 will leave the eip +1. // So the context structure it will pass to the Handler is ideally (X+1)-1 = X // // ** Here's the race: Since thread B redirected A, the eip is actually Y (not X+1), // but the kernel still touches it up to Y-1. So there's a window between when we hit a // bp and when the handler gets called that this can happen. // This causes an unhandled BP (since the debugger doesn't recognize the bp at Y-1) // // So what to do: If we land at Y-1 (ie, if f_IP+1 is the addr of a Redirected Func), // then restore the EIP back to X. This will skip the redirection. // Fortunately, this only occurs in cases where it's ok // to skip. The debugger will recognize the patch and handle it. if (Thread::IsAddrOfRedirectFunc((PVOID)(f_IP + 1))) { _ASSERTE(pThread->GetSavedRedirectContext()); SetIP(pContext, GetIP(pThread->GetSavedRedirectContext()) - 1); STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 3 setting IP = %x\n", pContext->Eip); } if (f_IP + 1 == GetEEFuncEntryPoint(THROW_CONTROL_FOR_THREAD_FUNCTION)) { SetIP(pContext, GetIP(pThread->m_OSContext) - 1); STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 4 setting IP = %x\n", pContext->Eip); } } #endif // FEATURE_HIJACK uint32_t g_exceptionCount; //****************************************************************************** EXCEPTION_DISPOSITION COMPlusAfterUnwind( EXCEPTION_RECORD *pExceptionRecord, EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, ThrowCallbackType& tct) { WRAPPER_NO_CONTRACT; // Note: we've completed the unwind pass up to the establisher frame, and we're headed off to finish our // cleanup and end up back in jitted code. Any more FS0 handlers pushed from this point on out will _not_ be // unwound. We go ahead and assert right here that indeed there are no handlers below the establisher frame // before we go any further. _ASSERTE(pEstablisherFrame == GetCurrentSEHRecord()); Thread* pThread = GetThread(); _ASSERTE(tct.pCurrentExceptionRecord == pEstablisherFrame); NestedHandlerExRecord nestedHandlerExRecord; nestedHandlerExRecord.Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, GetCurrFrame(pEstablisherFrame)); // ... and now, put the nested record back on. INSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg)); // We entered COMPlusAfterUnwind in PREEMP, but we need to be in COOP from here on out GCX_COOP_NO_DTOR(); tct.bIsUnwind = TRUE; tct.pProfilerNotify = NULL; LOG((LF_EH, LL_INFO100, "COMPlusFrameHandler: unwinding\n")); tct.bUnwindStack = CPFH_ShouldUnwindStack(pExceptionRecord); LOG((LF_EH, LL_INFO1000, "COMPlusAfterUnwind: going to: pFunc:%#X, pStack:%#X\n", tct.pFunc, tct.pStack)); UnwindFrames(pThread, &tct); #ifdef DEBUGGING_SUPPORTED ExInfo* pExInfo = pThread->GetExceptionState()->GetCurrentExceptionTracker(); if (pExInfo->m_ValidInterceptionContext) { // By now we should have all unknown FS:[0] handlers unwinded along with the managed Frames until // the interception point. We can now pop nested exception handlers and resume at interception context. EHContext context = pExInfo->m_InterceptionContext; pExInfo->m_InterceptionContext.Init(); pExInfo->m_ValidInterceptionContext = FALSE; UnwindExceptionTrackerAndResumeInInterceptionFrame(pExInfo, &context); } #endif // DEBUGGING_SUPPORTED _ASSERTE(!"Should not get here"); return ExceptionContinueSearch; } // EXCEPTION_DISPOSITION COMPlusAfterUnwind() #ifdef DEBUGGING_SUPPORTED //--------------------------------------------------------------------------------------- // // This function is called to intercept an exception and start an unwind. // // Arguments: // pCurrentEstablisherFrame - the exception registration record covering the stack range // containing the interception point // pExceptionRecord - EXCEPTION_RECORD of the exception being intercepted // // Return Value: // ExceptionContinueSearch if the exception cannot be intercepted // // Notes: // If the exception is intercepted, this function never returns. // EXCEPTION_DISPOSITION ClrDebuggerDoUnwindAndIntercept(EXCEPTION_REGISTRATION_RECORD *pCurrentEstablisherFrame, EXCEPTION_RECORD *pExceptionRecord) { WRAPPER_NO_CONTRACT; if (!CheckThreadExceptionStateForInterception()) { return ExceptionContinueSearch; } Thread* pThread = GetThread(); ThreadExceptionState* pExState = pThread->GetExceptionState(); EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame; ThrowCallbackType tct; tct.Init(); pExState->GetDebuggerState()->GetDebuggerInterceptInfo(&pEstablisherFrame, &(tct.pFunc), &(tct.dHandler), &(tct.pStack), NULL, &(tct.pBottomFrame) ); // // If the handler that we've selected as the handler for the target frame of the unwind is in fact above the // handler that we're currently executing in, then use the current handler instead. Why? Our handlers for // nested exceptions actually process managed frames that live above them, up to the COMPlusFrameHanlder that // pushed the nested handler. If the user selectes a frame above the nested handler, then we will have selected // the COMPlusFrameHandler above the current nested handler. But we don't want to ask RtlUnwind to unwind past // the nested handler that we're currently executing in. // if (pEstablisherFrame > pCurrentEstablisherFrame) { // This should only happen if we're in a COMPlusNestedExceptionHandler. _ASSERTE(IsComPlusNestedExceptionRecord(pCurrentEstablisherFrame)); pEstablisherFrame = pCurrentEstablisherFrame; } #ifdef _DEBUG tct.pCurrentExceptionRecord = pEstablisherFrame; #endif LOG((LF_EH|LF_CORDB, LL_INFO100, "ClrDebuggerDoUnwindAndIntercept: Intercepting at %s\n", tct.pFunc->m_pszDebugMethodName)); LOG((LF_EH|LF_CORDB, LL_INFO100, "\t\t: pFunc is 0x%X\n", tct.pFunc)); LOG((LF_EH|LF_CORDB, LL_INFO100, "\t\t: pStack is 0x%X\n", tct.pStack)); CallRtlUnwindSafe(pEstablisherFrame, RtlUnwindCallback, pExceptionRecord, 0); ExInfo* pExInfo = pThread->GetExceptionState()->GetCurrentExceptionTracker(); if (pExInfo->m_ValidInterceptionContext) { // By now we should have all unknown FS:[0] handlers unwinded along with the managed Frames until // the interception point. We can now pop nested exception handlers and resume at interception context. GCX_COOP(); EHContext context = pExInfo->m_InterceptionContext; pExInfo->m_InterceptionContext.Init(); pExInfo->m_ValidInterceptionContext = FALSE; UnwindExceptionTrackerAndResumeInInterceptionFrame(pExInfo, &context); } // on x86 at least, RtlUnwind always returns // Note: we've completed the unwind pass up to the establisher frame, and we're headed off to finish our // cleanup and end up back in jitted code. Any more FS0 handlers pushed from this point on out will _not_ be // unwound. return COMPlusAfterUnwind(pExState->GetExceptionRecord(), pEstablisherFrame, tct); } // EXCEPTION_DISPOSITION ClrDebuggerDoUnwindAndIntercept() #endif // DEBUGGING_SUPPORTED // This is a wrapper around the assembly routine that invokes RtlUnwind in the OS. // When we invoke RtlUnwind, the OS will modify the ExceptionFlags field in the // exception record to reflect unwind. Since we call RtlUnwind in the first pass // with a valid exception record when we find an exception handler AND because RtlUnwind // returns on x86, the OS would have flagged the exception record for unwind. // // Incase the exception is rethrown from the catch/filter-handler AND it's a non-COMPLUS // exception, the runtime will use the reference to the saved exception record to reraise // the exception, as part of rethrow fixup. Since the OS would have modified the exception record // to reflect unwind, this wrapper will "reset" the ExceptionFlags field when RtlUnwind returns. // Otherwise, the rethrow will result in second pass, as opposed to first, since the ExceptionFlags // would indicate an unwind. // // This rethrow issue does not affect COMPLUS exceptions since we always create a brand new exception // record for them in RaiseTheExceptionInternalOnly. BOOL CallRtlUnwindSafe(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, RtlUnwindCallbackType callback, EXCEPTION_RECORD *pExceptionRecord, void *retval) { LIMITED_METHOD_CONTRACT; // Save the ExceptionFlags value before invoking RtlUnwind. DWORD dwExceptionFlags = pExceptionRecord->ExceptionFlags; BOOL fRetVal = CallRtlUnwind(pEstablisherFrame, callback, pExceptionRecord, retval); // Reset ExceptionFlags field, if applicable if (pExceptionRecord->ExceptionFlags != dwExceptionFlags) { // We would expect the 32bit OS to have set the unwind flag at this point. _ASSERTE(pExceptionRecord->ExceptionFlags & EXCEPTION_UNWINDING); LOG((LF_EH, LL_INFO100, "CallRtlUnwindSafe: Resetting ExceptionFlags from %lu to %lu\n", pExceptionRecord->ExceptionFlags, dwExceptionFlags)); pExceptionRecord->ExceptionFlags = dwExceptionFlags; } return fRetVal; } //****************************************************************************** // The essence of the first pass handler (after we've decided to actually do // the first pass handling). //****************************************************************************** inline EXCEPTION_DISPOSITION __cdecl CPFH_RealFirstPassHandler( // ExceptionContinueSearch, etc. EXCEPTION_RECORD *pExceptionRecord, // The exception record, with exception type. EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, // Exception frame on whose behalf this is called. CONTEXT *pContext, // Context from the exception. void *pDispatcherContext, // @todo BOOL bAsynchronousThreadStop, // @todo BOOL fPGCDisabledOnEntry) // @todo { // We don't want to use a runtime contract here since this codepath is used during // the processing of a hard SO. Contracts use a significant amount of stack // which we can't afford for those cases. STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_COOPERATIVE; #ifdef _DEBUG static int breakOnFirstPass = -1; if (breakOnFirstPass == -1) breakOnFirstPass = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnFirstPass); if (breakOnFirstPass != 0) { _ASSERTE(!"First pass exception handler"); } #endif EXCEPTION_DISPOSITION retval; DWORD exceptionCode = pExceptionRecord->ExceptionCode; Thread *pThread = GetThread(); #ifdef _DEBUG static int breakOnSO = -1; if (breakOnSO == -1) breakOnSO = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_BreakOnSO); if (breakOnSO != 0 && exceptionCode == STATUS_STACK_OVERFLOW) { DebugBreak(); // ASSERTing will overwrite the guard region } #endif // We always want to be in co-operative mode when we run this function and whenever we return // from it, want to go to pre-emptive mode because are returning to OS. _ASSERTE(pThread->PreemptiveGCDisabled()); BOOL bPopNestedHandlerExRecord = FALSE; LFH found = LFH_NOT_FOUND; // Result of calling LookForHandler. BOOL bRethrownException = FALSE; BOOL bNestedException = FALSE; #if defined(USE_FEF) BOOL bPopFaultingExceptionFrame = FALSE; FrameWithCookie<FaultingExceptionFrame> faultingExceptionFrame; #endif // USE_FEF ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); ThrowCallbackType tct; tct.Init(); tct.pTopFrame = GetCurrFrame(pEstablisherFrame); // highest frame to search to #ifdef _DEBUG tct.pCurrentExceptionRecord = pEstablisherFrame; tct.pPrevExceptionRecord = GetPrevSEHRecord(pEstablisherFrame); #endif // _DEBUG BOOL fIsManagedCode = pContext ? ExecutionManager::IsManagedCode(GetIP(pContext)) : FALSE; // this establishes a marker so can determine if are processing a nested exception // don't want to use the current frame to limit search as it could have been unwound by // the time get to nested handler (ie if find an exception, unwind to the call point and // then resume in the catch and then get another exception) so make the nested handler // have the same boundary as this one. If nested handler can't find a handler, we won't // end up searching this frame list twice because the nested handler will set the search // boundary in the thread and so if get back to this handler it will have a range that starts // and ends at the same place. NestedHandlerExRecord nestedHandlerExRecord; nestedHandlerExRecord.Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, GetCurrFrame(pEstablisherFrame)); INSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg)); bPopNestedHandlerExRecord = TRUE; #if defined(USE_FEF) // Note: don't attempt to push a FEF for an exception in managed code if we weren't in cooperative mode when // the exception was received. If preemptive GC was enabled when we received the exception, then it means the // exception was rethrown from unmangaed code (including EE impl), and we shouldn't push a FEF. if (fIsManagedCode && fPGCDisabledOnEntry && (pThread->m_pFrame == FRAME_TOP || pThread->m_pFrame->GetVTablePtr() != FaultingExceptionFrame::GetMethodFrameVPtr() || (size_t)pThread->m_pFrame > (size_t)pEstablisherFrame)) { // setup interrupted frame so that GC during calls to init won't collect the frames // only need it for non COM+ exceptions in managed code when haven't already // got one on the stack (will have one already if we have called rtlunwind because // the instantiation that called unwind would have installed one) faultingExceptionFrame.InitAndLink(pContext); bPopFaultingExceptionFrame = TRUE; } #endif // USE_FEF OBJECTREF e; e = pThread->LastThrownObject(); STRESS_LOG7(LF_EH, LL_INFO10, "CPFH_RealFirstPassHandler: code:%X, LastThrownObject:%p, MT:%pT" ", IP:%p, SP:%p, pContext:%p, pEstablisherFrame:%p\n", exceptionCode, OBJECTREFToObject(e), (e!=0)?e->GetMethodTable():0, pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0, pContext, pEstablisherFrame); #ifdef LOGGING // If it is a complus exception, and there is a thrown object, get its name, for better logging. if (IsComPlusException(pExceptionRecord)) { const char * eClsName = "!EXCEPTION_COMPLUS"; if (e != 0) { eClsName = e->GetMethodTable()->GetDebugClassName(); } LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: exception: 0x%08X, class: '%s', IP: 0x%p\n", exceptionCode, eClsName, pContext ? GetIP(pContext) : NULL)); } #endif EXCEPTION_POINTERS exceptionPointers = {pExceptionRecord, pContext}; STRESS_LOG4(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: setting boundaries: Exinfo: 0x%p, BottomMostHandler:0x%p, SearchBoundary:0x%p, TopFrame:0x%p\n", pExInfo, pExInfo->m_pBottomMostHandler, pExInfo->m_pSearchBoundary, tct.pTopFrame); // Here we are trying to decide if we are coming in as: // 1) first handler in a brand new exception // 2) a subsequent handler in an exception // 3) a nested exception // m_pBottomMostHandler is the registration structure (establisher frame) for the most recent (ie lowest in // memory) non-nested handler that was installed and pEstablisher frame is what the current handler // was registered with. // The OS calls each registered handler in the chain, passing its establisher frame to it. if (pExInfo->m_pBottomMostHandler != NULL && pEstablisherFrame > pExInfo->m_pBottomMostHandler) { STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: detected subsequent handler. ExInfo:0x%p, BottomMost:0x%p SearchBoundary:0x%p\n", pExInfo, pExInfo->m_pBottomMostHandler, pExInfo->m_pSearchBoundary); // If the establisher frame of this handler is greater than the bottommost then it must have been // installed earlier and therefore we are case 2 if (pThread->GetThrowable() == NULL) { // Bottommost didn't setup a throwable, so not exception not for us retval = ExceptionContinueSearch; goto exit; } // setup search start point tct.pBottomFrame = pExInfo->m_pSearchBoundary; if (tct.pTopFrame == tct.pBottomFrame) { // this will happen if our nested handler already searched for us so we don't want // to search again retval = ExceptionContinueSearch; goto exit; } } else { // we are either case 1 or case 3 #if defined(_DEBUG_IMPL) //@todo: merge frames, context, handlers if (pThread->GetFrame() != FRAME_TOP) pThread->GetFrame()->LogFrameChain(LF_EH, LL_INFO1000); #endif // _DEBUG_IMPL // If the exception was rethrown, we'll create a new ExInfo, which will represent the rethrown exception. // The original exception is not the rethrown one. if (pExInfo->m_ExceptionFlags.IsRethrown() && pThread->LastThrownObject() != NULL) { pExInfo->m_ExceptionFlags.ResetIsRethrown(); bRethrownException = TRUE; #if defined(USE_FEF) if (bPopFaultingExceptionFrame) { // if we added a FEF, it will refer to the frame at the point of the original exception which is // already unwound so don't want it. // If we rethrew the exception we have already added a helper frame for the rethrow, so don't // need this one. If we didn't rethrow it, (ie rethrow from native) then there the topmost frame will // be a transition to native frame in which case we don't need it either faultingExceptionFrame.Pop(); bPopFaultingExceptionFrame = FALSE; } #endif } // If the establisher frame is less than the bottommost handler, then this is nested because the // establisher frame was installed after the bottommost. if (pEstablisherFrame < pExInfo->m_pBottomMostHandler /* || IsComPlusNestedExceptionRecord(pEstablisherFrame) */ ) { bNestedException = TRUE; // case 3: this is a nested exception. Need to save and restore the thread info STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: ExInfo:0x%p detected nested exception 0x%p < 0x%p\n", pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler); EXCEPTION_REGISTRATION_RECORD* pNestedER = TryFindNestedEstablisherFrame(pEstablisherFrame); ExInfo *pNestedExInfo; if (!pNestedER || pNestedER >= pExInfo->m_pBottomMostHandler ) { // RARE CASE. We've re-entered the EE from an unmanaged filter. // // OR // // We can be here if we dont find a nested exception handler. This is exemplified using // call chain of scenario 2 explained further below. // // Assuming __try of NativeB throws an exception E1 and it gets caught in ManagedA2, then // bottom-most handler (BMH) is going to be CPFH_A. The catch will trigger an unwind // and invoke __finally in NativeB. Let the __finally throw a new exception E2. // // Assuming ManagedB2 has a catch block to catch E2, when we enter CPFH_B looking for a // handler for E2, our establisher frame will be that of CPFH_B, which will be lower // in stack than current BMH (which is CPFH_A). Thus, we will come here, determining // E2 to be nested exception correctly but not find a nested exception handler. void *limit = (void *) GetPrevSEHRecord(pExInfo->m_pBottomMostHandler); pNestedExInfo = new (nothrow) ExInfo(); // Very rare failure here; need robust allocator. if (pNestedExInfo == NULL) { // if we can't allocate memory, we can't correctly continue. #if defined(_DEBUG) if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NestedEhOom)) _ASSERTE(!"OOM in callback from unmanaged filter."); #endif // _DEBUG EEPOLICY_HANDLE_FATAL_ERROR(COR_E_OUTOFMEMORY); } pNestedExInfo->m_StackAddress = limit; // Note: this is also the flag that tells us this // ExInfo was stack allocated. } else { pNestedExInfo = &((NestedHandlerExRecord*)pNestedER)->m_handlerInfo; } LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: PushExInfo() current: 0x%p previous: 0x%p\n", pExInfo->m_StackAddress, pNestedExInfo->m_StackAddress)); _ASSERTE(pNestedExInfo); pNestedExInfo->m_hThrowable = NULL; // pNestedExInfo may be stack allocated, and as such full of // garbage. m_hThrowable must be sane, so set it to NULL. (We could // zero the entire record, but this is cheaper.) pNestedExInfo->CopyAndClearSource(pExInfo); pExInfo->m_pPrevNestedInfo = pNestedExInfo; // Save at head of nested info chain #if 0 /* the following code was introduced in Whidbey as part of the Faulting Exception Frame removal (12/03). However it isn't correct. If any nested exceptions occur while processing a rethrow, we would incorrectly consider the nested exception to be a rethrow. See VSWhidbey 349379 for an example. Therefore I am disabling this code until we see a failure that explains why it was added in the first place. cwb 9/04. */ // If we're here as a result of a rethrown exception, set the rethrown flag on the new ExInfo. if (bRethrownException) { pExInfo->m_ExceptionFlags.SetIsRethrown(); } #endif } else { // At this point, either: // // 1) the bottom-most handler is NULL, implying this is a new exception for which we are getting ready, OR // 2) the bottom-most handler is not-NULL, implying that a there is already an existing exception in progress. // // Scenario 1 is that of a new throw and is easy to understand. Scenario 2 is the interesting one. // // ManagedA1 -> ManagedA2 -> ManagedA3 -> NativeCodeA -> ManagedB1 -> ManagedB2 -> ManagedB3 -> NativeCodeB // // On x86, each block of managed code is protected by one COMPlusFrameHandler [CPFH] (CLR's exception handler // for managed code), unlike 64bit where each frame has a personality routine attached to it. Thus, // for the example above, assume CPFH_A protects ManagedA* blocks and is setup just before the call to // ManagedA1. Likewise, CPFH_B protects ManagedB* blocks and is setup just before the call to ManagedB1. // // When ManagedB3 throws an exception, CPFH_B is invoked to look for a handler in all of the ManagedB* blocks. // At this point, it is setup as the "bottom-most-handler" (BMH). If no handler is found and exception reaches // ManagedA* blocks, CPFH_A is invoked to look for a handler and thus, becomes BMH. // // Thus, in the first pass on x86 for a given exception, a particular CPFH will be invoked only once when looking // for a handler and thus, registered as BMH only once. Either the exception goes unhandled and the process will // terminate or a handler will be found and second pass will commence. // // However, assume NativeCodeB had a __try/__finally and raised an exception [E1] within the __try. Let's assume // it gets caught in ManagedB1 and thus, unwind is triggered. At this point, the active exception tracker // has context about the exception thrown out of __try and CPFH_B is registered as BMH. // // If the __finally throws a new exception [E2], CPFH_B will be invoked again for first pass while looking for // a handler for the thrown exception. Since BMH is already non-NULL, we will come here since EstablisherFrame will be // the same as BMH (because EstablisherFrame will be that of CPFH_B). We will proceed to overwrite the "required" parts // of the existing exception tracker with the details of E2 (see setting of exception record and context below), erasing // any artifact of E1. // // This is unlike Scenario 1 when exception tracker is completely initialized to default values. This is also // unlike 64bit which will detect that E1 and E2 are different exceptions and hence, will setup a new tracker // to track E2, effectively behaving like Scenario 1 above. X86 cannot do this since there is no nested exception // tracker setup that gets to see the new exception. // // Thus, if E1 was a CSE and E2 isn't, we will come here and treat E2 as a CSE as well since corruption severity // is initialized as part of exception tracker initialization. Thus, E2 will start to be treated as CSE, which is // incorrect. Similar argument applies to delivery of First chance exception notification delivery. // // <QUIP> Another example why we should unify EH systems :) </QUIP> // // To address this issue, we will need to reset exception tracker here, just like the overwriting of "required" // parts of exception tracker. // If the current establisher frame is the same as the bottom-most-handler and we are here // in the first pass, assert that current exception and the one tracked by active exception tracker // are indeed different exceptions. In such a case, we must reset the exception tracker so that it can be // setup correctly further down when CEHelper::SetupCorruptionSeverityForActiveException is invoked. if ((pExInfo->m_pBottomMostHandler != NULL) && (pEstablisherFrame == pExInfo->m_pBottomMostHandler)) { // Current exception should be different from the one exception tracker is already tracking. _ASSERTE(pExceptionRecord != pExInfo->m_pExceptionRecord); // This cannot be nested exceptions - they are handled earlier (see above). _ASSERTE(!bNestedException); LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: Bottom-most handler (0x%p) is the same as EstablisherFrame.\n", pExInfo->m_pBottomMostHandler)); LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: Exception record in exception tracker is 0x%p, while that of new exception is 0x%p.\n", pExInfo->m_pExceptionRecord, pExceptionRecord)); LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: Resetting exception tracker (0x%p).\n", pExInfo)); // This will reset the exception tracker state, including the corruption severity. pExInfo->Init(); } } // If we are handling a fault from managed code, we need to set the Thread->ExInfo->pContext to // the current fault context, which is used in the stack walk to get back into the managed // stack with the correct registers. (Previously, this was done by linking in a FaultingExceptionFrame // record.) // We are about to create the managed exception object, which may trigger a GC, so set this up now. pExInfo->m_pExceptionRecord = pExceptionRecord; pExInfo->m_pContext = pContext; if (pContext && ShouldHandleManagedFault(pExceptionRecord, pContext, pEstablisherFrame, pThread)) { // If this was a fault in managed code, rather than create a Frame for stackwalking, // we can use this exinfo (after all, it has all the register info.) pExInfo->m_ExceptionFlags.SetUseExInfoForStackwalk(); } // It should now be safe for a GC to happen. // case 1 & 3: this is the first time through of a new, nested, or rethrown exception, so see if we can // find a handler. Only setup throwable if are bottommost handler if (IsComPlusException(pExceptionRecord) && (!bAsynchronousThreadStop)) { // Update the throwable from the last thrown object. Note: this may cause OOM, in which case we replace // both throwables with the preallocated OOM exception. pThread->SafeSetThrowables(pThread->LastThrownObject()); // now we've got a COM+ exception, fall through to so see if we handle it STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: fall through ExInfo:0x%p setting m_pBottomMostHandler to 0x%p from 0x%p\n", pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler); pExInfo->m_pBottomMostHandler = pEstablisherFrame; } else if (bRethrownException) { // If it was rethrown and not COM+, will still be the last one thrown. Either we threw it last and // stashed it here or someone else caught it and rethrew it, in which case it will still have been // originally stashed here. // Update the throwable from the last thrown object. Note: this may cause OOM, in which case we replace // both throwables with the preallocated OOM exception. pThread->SafeSetThrowables(pThread->LastThrownObject()); STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: rethrow non-COM+ ExInfo:0x%p setting m_pBottomMostHandler to 0x%p from 0x%p\n", pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler); pExInfo->m_pBottomMostHandler = pEstablisherFrame; } else { if (!fIsManagedCode) { tct.bDontCatch = false; } if (exceptionCode == STATUS_BREAKPOINT) { // don't catch int 3 retval = ExceptionContinueSearch; goto exit; } // We need to set m_pBottomMostHandler here, Thread::IsExceptionInProgress returns 1. // This is a necessary part of suppressing thread abort exceptions in the constructor // of any exception object we might create. STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: setting ExInfo:0x%p m_pBottomMostHandler for IsExceptionInProgress to 0x%p from 0x%p\n", pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler); pExInfo->m_pBottomMostHandler = pEstablisherFrame; // Create the managed exception object. OBJECTREF throwable = CreateCOMPlusExceptionObject(pThread, pExceptionRecord, bAsynchronousThreadStop); // Set the throwables on the thread to the newly created object. If this fails, it will return a // preallocated exception object instead. This also updates the last thrown exception, for rethrows. throwable = pThread->SafeSetThrowables(throwable); // Set the exception code and pointers. We set these after setting the throwables on the thread, // because if the proper exception is replaced by an OOM exception, we still want the exception code // and pointers set in the OOM exception. EXCEPTIONREF exceptionRef = (EXCEPTIONREF)throwable; exceptionRef->SetXCode(pExceptionRecord->ExceptionCode); exceptionRef->SetXPtrs(&exceptionPointers); } tct.pBottomFrame = NULL; EEToProfilerExceptionInterfaceWrapper::ExceptionThrown(pThread); g_exceptionCount++; } // End of case-1-or-3 { // Allocate storage for the stack trace. OBJECTREF throwable = NULL; GCPROTECT_BEGIN(throwable); throwable = pThread->GetThrowable(); if (IsProcessCorruptedStateException(exceptionCode, throwable)) { // Failfast if exception indicates corrupted process state EEPOLICY_HANDLE_FATAL_ERROR(exceptionCode); } // If we're out of memory, then we figure there's probably not memory to maintain a stack trace, so we skip it. // If we've got a stack overflow, then we figure the stack will be so huge as to make tracking the stack trace // impracticle, so we skip it. if ((throwable == CLRException::GetPreallocatedOutOfMemoryException()) || (throwable == CLRException::GetPreallocatedStackOverflowException())) { tct.bAllowAllocMem = FALSE; } else { pExInfo->m_StackTraceInfo.AllocateStackTrace(); } GCPROTECT_END(); } // Set up information for GetExceptionPointers()/GetExceptionCode() callback. pExInfo->SetExceptionCode(pExceptionRecord); pExInfo->m_pExceptionPointers = &exceptionPointers; if (bRethrownException || bNestedException) { _ASSERTE(pExInfo->m_pPrevNestedInfo != NULL); SetStateForWatsonBucketing(bRethrownException, pExInfo->GetPreviousExceptionTracker()->GetThrowableAsHandle()); } #ifdef DEBUGGING_SUPPORTED // // At this point the exception is still fresh to us, so assert that // there should be nothing from the debugger on it. // _ASSERTE(!pExInfo->m_ExceptionFlags.DebuggerInterceptInfo()); #endif if (pThread->IsRudeAbort()) { OBJECTREF throwable = pThread->GetThrowable(); if (throwable == NULL || !IsExceptionOfType(kThreadAbortException, &throwable)) { // Neither of these sets will throw because the throwable that we're setting is a preallocated // exception. This also updates the last thrown exception, for rethrows. pThread->SafeSetThrowables(CLRException::GetBestThreadAbortException()); } if (!pThread->IsRudeAbortInitiated()) { pThread->PreWorkForThreadAbort(); } } LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: looking for handler bottom %x, top %x\n", tct.pBottomFrame, tct.pTopFrame)); tct.bReplaceStack = pExInfo->m_pBottomMostHandler == pEstablisherFrame && !bRethrownException; tct.bSkipLastElement = bRethrownException && bNestedException; found = LookForHandler(&exceptionPointers, pThread, &tct); // We have searched this far. pExInfo->m_pSearchBoundary = tct.pTopFrame; LOG((LF_EH, LL_INFO1000, "CPFH_RealFirstPassHandler: set pSearchBoundary to 0x%p\n", pExInfo->m_pSearchBoundary)); if ((found == LFH_NOT_FOUND) #ifdef DEBUGGING_SUPPORTED && !pExInfo->m_ExceptionFlags.DebuggerInterceptInfo() #endif ) { LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: NOT_FOUND\n")); if (tct.pTopFrame == FRAME_TOP) { LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: NOT_FOUND at FRAME_TOP\n")); } retval = ExceptionContinueSearch; goto exit; } else { // so we are going to handle the exception // Remove the nested exception record -- before calling RtlUnwind. // The second-pass callback for a NestedExceptionRecord assumes that if it's // being unwound, it should pop one exception from the pExInfo chain. This is // true for any older NestedRecords that might be unwound -- but not for the // new one we're about to add. To avoid this, we remove the new record // before calling Unwind. // // <TODO>@NICE: This can probably be a little cleaner -- the nested record currently // is also used to guard the running of the filter code. When we clean up the // behaviour of exceptions within filters, we should be able to get rid of this // PUSH/POP/PUSH behaviour.</TODO> _ASSERTE(bPopNestedHandlerExRecord); UNINSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg)); // Since we are going to handle the exception we switch into preemptive mode GCX_PREEMP_NO_DTOR(); #ifdef DEBUGGING_SUPPORTED // // Check if the debugger wants to intercept this frame at a different point than where we are. // if (pExInfo->m_ExceptionFlags.DebuggerInterceptInfo()) { ClrDebuggerDoUnwindAndIntercept(pEstablisherFrame, pExceptionRecord); // // If this returns, then the debugger couldn't do it's stuff and we default to the found handler. // if (found == LFH_NOT_FOUND) { retval = ExceptionContinueSearch; // we need to be sure to switch back into Cooperative mode since we are going to // jump to the exit: label and follow the normal return path (it is expected that // CPFH_RealFirstPassHandler returns in COOP. GCX_PREEMP_NO_DTOR_END(); goto exit; } } #endif LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: handler found: %s\n", tct.pFunc->m_pszDebugMethodName)); CallRtlUnwindSafe(pEstablisherFrame, RtlUnwindCallback, pExceptionRecord, 0); // on x86 at least, RtlUnwind always returns // The CallRtlUnwindSafe could have popped the explicit frame that the tct.pBottomFrame points to (UMThunkPrestubHandler // does that). In such case, the tct.pBottomFrame needs to be updated to point to the first valid explicit frame. Frame* frame = pThread->GetFrame(); if ((tct.pBottomFrame != NULL) && (frame > tct.pBottomFrame)) { tct.pBottomFrame = frame; } // Note: we've completed the unwind pass up to the establisher frame, and we're headed off to finish our // cleanup and end up back in jitted code. Any more FS0 handlers pushed from this point on out will _not_ be // unwound. // Note: we are still in Preemptive mode here and that is correct, COMPlusAfterUnwind will switch us back // into Cooperative mode. return COMPlusAfterUnwind(pExceptionRecord, pEstablisherFrame, tct); } exit: { // We need to be in COOP if we get here GCX_ASSERT_COOP(); } // If we got as far as saving pExInfo, save the context pointer so it's available for the unwind. if (pExInfo) { pExInfo->m_pContext = pContext; // pExInfo->m_pExceptionPointers points to a local structure, which is now going out of scope. pExInfo->m_pExceptionPointers = NULL; } #if defined(USE_FEF) if (bPopFaultingExceptionFrame) { faultingExceptionFrame.Pop(); } #endif // USE_FEF if (bPopNestedHandlerExRecord) { UNINSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg)); } return retval; } // CPFH_RealFirstPassHandler() //****************************************************************************** // void InitializeExceptionHandling() { WRAPPER_NO_CONTRACT; InitSavedExceptionInfo(); CLRAddVectoredHandlers(); // Initialize the lock used for synchronizing access to the stacktrace in the exception object g_StackTraceArrayLock.Init(LOCK_TYPE_DEFAULT, TRUE); } //****************************************************************************** static inline EXCEPTION_DISPOSITION __cdecl CPFH_FirstPassHandler(EXCEPTION_RECORD *pExceptionRecord, EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, CONTEXT *pContext, DISPATCHER_CONTEXT *pDispatcherContext) { WRAPPER_NO_CONTRACT; EXCEPTION_DISPOSITION retval; _ASSERTE (!(pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND))); DWORD exceptionCode = pExceptionRecord->ExceptionCode; Thread *pThread = GetThread(); STRESS_LOG4(LF_EH, LL_INFO100, "CPFH_FirstPassHandler: pEstablisherFrame = %x EH code = %x EIP = %x with ESP = %x\n", pEstablisherFrame, exceptionCode, pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0); EXCEPTION_POINTERS ptrs = { pExceptionRecord, pContext }; // Call to the vectored handler to give other parts of the Runtime a chance to jump in and take over an // exception before we do too much with it. The most important point in the vectored handler is not to toggle // the GC mode. DWORD filter = CLRVectoredExceptionHandler(&ptrs); if (filter == (DWORD) EXCEPTION_CONTINUE_EXECUTION) { return ExceptionContinueExecution; } else if (filter == EXCEPTION_CONTINUE_SEARCH) { return ExceptionContinueSearch; } #if defined(STRESS_HEAP) // // Check to see if this exception is due to GCStress. Since the GCStress mechanism only injects these faults // into managed code, we only need to check for them in CPFH_FirstPassHandler. // if (IsGcMarker(pContext, pExceptionRecord)) { return ExceptionContinueExecution; } #endif // STRESS_HEAP // We always want to be in co-operative mode when we run this function and whenever we return // from it, want to go to pre-emptive mode because are returning to OS. BOOL disabled = pThread->PreemptiveGCDisabled(); GCX_COOP_NO_DTOR(); BOOL bAsynchronousThreadStop = IsThreadHijackedForThreadStop(pThread, pExceptionRecord); if (bAsynchronousThreadStop) { // If we ever get here in preemptive mode, we're in trouble. We've // changed the thread's IP to point at a little function that throws ... if // the thread were to be in preemptive mode and a GC occurred, the stack // crawl would have been all messed up (becuase we have no frame that points // us back to the right place in managed code). _ASSERTE(disabled); AdjustContextForThreadStop(pThread, pContext); LOG((LF_EH, LL_INFO100, "CPFH_FirstPassHandler is Asynchronous Thread Stop or Abort\n")); } pThread->ResetThrowControlForThread(); CPFH_VerifyThreadIsInValidState(pThread, exceptionCode, pEstablisherFrame); // If we were in cooperative mode when we came in here, then its okay to see if we should do HandleManagedFault // and push a FaultingExceptionFrame. If we weren't in coop mode coming in here, then it means that there's no // way the exception could really be from managed code. I might look like it was from managed code, but in // reality its a rethrow from unmanaged code, either unmanaged user code, or unmanaged EE implementation. if (disabled && ShouldHandleManagedFault(pExceptionRecord, pContext, pEstablisherFrame, pThread)) { #if defined(USE_FEF) HandleManagedFault(pExceptionRecord, pContext, pEstablisherFrame, pThread); retval = ExceptionContinueExecution; goto exit; #else // USE_FEF // Save the context pointer in the Thread's EXInfo, so that a stack crawl can recover the // register values from the fault. //@todo: I haven't yet found any case where we need to do anything here. If there are none, eliminate // this entire if () {} block. #endif // USE_FEF } // OK. We're finally ready to start the real work. Nobody else grabbed the exception in front of us. Now we can // get started. retval = CPFH_RealFirstPassHandler(pExceptionRecord, pEstablisherFrame, pContext, pDispatcherContext, bAsynchronousThreadStop, disabled); #if defined(USE_FEF) // This label is only used in the HandleManagedFault() case above. exit: #endif if (retval != ExceptionContinueExecution || !disabled) { GCX_PREEMP_NO_DTOR(); } STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_FirstPassHandler: exiting with retval %d\n", retval); return retval; } // CPFH_FirstPassHandler() //****************************************************************************** inline void CPFH_UnwindFrames1(Thread* pThread, EXCEPTION_REGISTRATION_RECORD* pEstablisherFrame, DWORD exceptionCode) { WRAPPER_NO_CONTRACT; ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); // Ready to unwind the stack... ThrowCallbackType tct; tct.Init(); tct.bIsUnwind = TRUE; tct.pTopFrame = GetCurrFrame(pEstablisherFrame); // highest frame to search to tct.pBottomFrame = NULL; #ifdef _DEBUG tct.pCurrentExceptionRecord = pEstablisherFrame; tct.pPrevExceptionRecord = GetPrevSEHRecord(pEstablisherFrame); #endif #ifdef DEBUGGING_SUPPORTED EXCEPTION_REGISTRATION_RECORD *pInterceptEstablisherFrame = NULL; // If the exception is intercepted, use information stored in the DebuggerExState to unwind the stack. if (pExInfo->m_ExceptionFlags.DebuggerInterceptInfo()) { pExInfo->m_DebuggerExState.GetDebuggerInterceptInfo(&pInterceptEstablisherFrame, NULL, // MethodDesc **ppFunc, NULL, // int *pdHandler, NULL, // BYTE **ppStack NULL, // ULONG_PTR *pNativeOffset, NULL // Frame **ppFrame) ); LOG((LF_EH, LL_INFO1000, "CPFH_UnwindFrames1: frames are Est 0x%X, Intercept 0x%X\n", pEstablisherFrame, pInterceptEstablisherFrame)); // // When we set up for the interception we store off the CPFH or CPNEH that we // *know* will handle unwinding the destination of the intercept. // // However, a CPNEH with the same limiting Capital-F-rame could do the work // and unwind us, so... // // If this is the exact frame handler we are supposed to search for, or // if this frame handler services the same Capital-F-rame as the frame handler // we are looking for (i.e. this frame handler may do the work that we would // expect our frame handler to do), // then // we need to pass the interception destination during this unwind. // _ASSERTE(IsUnmanagedToManagedSEHHandler(pEstablisherFrame)); if ((pEstablisherFrame == pInterceptEstablisherFrame) || (GetCurrFrame(pEstablisherFrame) == GetCurrFrame(pInterceptEstablisherFrame))) { pExInfo->m_DebuggerExState.GetDebuggerInterceptInfo(NULL, &(tct.pFunc), &(tct.dHandler), &(tct.pStack), NULL, &(tct.pBottomFrame) ); LOG((LF_EH, LL_INFO1000, "CPFH_UnwindFrames1: going to: pFunc:%#X, pStack:%#X\n", tct.pFunc, tct.pStack)); } } #endif UnwindFrames(pThread, &tct); LOG((LF_EH, LL_INFO1000, "CPFH_UnwindFrames1: after unwind ec:%#x, tct.pTopFrame:0x%p, pSearchBndry:0x%p\n" " pEstFrame:0x%p, IsC+NestExRec:%d, !Nest||Active:%d\n", exceptionCode, tct.pTopFrame, pExInfo->m_pSearchBoundary, pEstablisherFrame, IsComPlusNestedExceptionRecord(pEstablisherFrame), (!IsComPlusNestedExceptionRecord(pEstablisherFrame) || reinterpret_cast<NestedHandlerExRecord*>(pEstablisherFrame)->m_ActiveForUnwind))); if (tct.pTopFrame >= pExInfo->m_pSearchBoundary && (!IsComPlusNestedExceptionRecord(pEstablisherFrame) || reinterpret_cast<NestedHandlerExRecord*>(pEstablisherFrame)->m_ActiveForUnwind) ) { // If this is the search boundary, and we're not a nested handler, then // this is the last time we'll see this exception. Time to unwind our // exinfo. STRESS_LOG0(LF_EH, LL_INFO100, "CPFH_UnwindFrames1: Exception unwind -- unmanaged catcher detected\n"); pExInfo->UnwindExInfo((VOID*)pEstablisherFrame); } } // CPFH_UnwindFrames1() //****************************************************************************** inline EXCEPTION_DISPOSITION __cdecl CPFH_UnwindHandler(EXCEPTION_RECORD *pExceptionRecord, EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, CONTEXT *pContext, void *pDispatcherContext) { WRAPPER_NO_CONTRACT; _ASSERTE (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND)); #ifdef _DEBUG // Note: you might be inclined to write "static int breakOnSecondPass = CLRConfig::GetConfigValue(...);", but // you can't do that here. That causes C++ EH to be generated under the covers for this function, and this // function isn't allowed to have any C++ EH in it because its never going to return. static int breakOnSecondPass; // = 0 static BOOL breakOnSecondPassSetup; // = FALSE if (!breakOnSecondPassSetup) { breakOnSecondPass = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnSecondPass); breakOnSecondPassSetup = TRUE; } if (breakOnSecondPass != 0) { _ASSERTE(!"Unwind handler"); } #endif DWORD exceptionCode = pExceptionRecord->ExceptionCode; Thread *pThread = GetThread(); ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); STRESS_LOG4(LF_EH, LL_INFO100, "In CPFH_UnwindHandler EHCode = %x EIP = %x with ESP = %x, pEstablisherFrame = 0x%p\n", exceptionCode, pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0, pEstablisherFrame); // We always want to be in co-operative mode when we run this function. Whenever we return // from it, want to go to pre-emptive mode because are returning to OS. { // needs to be in its own scope to avoid polluting the namespace, since // we don't do a _END then we don't revert the state GCX_COOP_NO_DTOR(); } CPFH_VerifyThreadIsInValidState(pThread, exceptionCode, pEstablisherFrame); if (IsComPlusNestedExceptionRecord(pEstablisherFrame)) { NestedHandlerExRecord *pHandler = reinterpret_cast<NestedHandlerExRecord*>(pEstablisherFrame); if (pHandler->m_pCurrentExInfo != NULL) { // See the comment at the end of COMPlusNestedExceptionHandler about nested exception. // OS is going to skip the EstablisherFrame before our NestedHandler. if (pHandler->m_pCurrentExInfo->m_pBottomMostHandler <= pHandler->m_pCurrentHandler) { // We're unwinding -- the bottom most handler is potentially off top-of-stack now. If // it is, change it to the next COM+ frame. (This one is not good, as it's about to // disappear.) EXCEPTION_REGISTRATION_RECORD *pNextBottomMost = GetNextCOMPlusSEHRecord(pHandler->m_pCurrentHandler); STRESS_LOG3(LF_EH, LL_INFO10000, "COMPlusNestedExceptionHandler: setting ExInfo:0x%p m_pBottomMostHandler from 0x%p to 0x%p\n", pHandler->m_pCurrentExInfo, pHandler->m_pCurrentExInfo->m_pBottomMostHandler, pNextBottomMost); pHandler->m_pCurrentExInfo->m_pBottomMostHandler = pNextBottomMost; } } } // this establishes a marker so can determine if are processing a nested exception // don't want to use the current frame to limit search as it could have been unwound by // the time get to nested handler (ie if find an exception, unwind to the call point and // then resume in the catch and then get another exception) so make the nested handler // have the same boundary as this one. If nested handler can't find a handler, we won't // end up searching this frame list twice because the nested handler will set the search // boundary in the thread and so if get back to this handler it will have a range that starts // and ends at the same place. NestedHandlerExRecord nestedHandlerExRecord; nestedHandlerExRecord.Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, GetCurrFrame(pEstablisherFrame)); nestedHandlerExRecord.m_ActiveForUnwind = TRUE; nestedHandlerExRecord.m_pCurrentExInfo = pExInfo; nestedHandlerExRecord.m_pCurrentHandler = pEstablisherFrame; INSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg)); // Unwind the stack. The establisher frame sets the boundary. CPFH_UnwindFrames1(pThread, pEstablisherFrame, exceptionCode); // We're unwinding -- the bottom most handler is potentially off top-of-stack now. If // it is, change it to the next COM+ frame. (This one is not good, as it's about to // disappear.) if (pExInfo->m_pBottomMostHandler && pExInfo->m_pBottomMostHandler <= pEstablisherFrame) { EXCEPTION_REGISTRATION_RECORD *pNextBottomMost = GetNextCOMPlusSEHRecord(pEstablisherFrame); // If there is no previous COM+ SEH handler, GetNextCOMPlusSEHRecord() will return -1. Much later, we will dereference that and AV. _ASSERTE (pNextBottomMost != EXCEPTION_CHAIN_END); STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_UnwindHandler: setting ExInfo:0x%p m_pBottomMostHandler from 0x%p to 0x%p\n", pExInfo, pExInfo->m_pBottomMostHandler, pNextBottomMost); pExInfo->m_pBottomMostHandler = pNextBottomMost; } { // needs to be in its own scope to avoid polluting the namespace, since // we don't do a _END then we don't revert the state GCX_PREEMP_NO_DTOR(); } UNINSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg)); // If we are here, then exception was not caught in managed code protected by this // ComplusFrameHandler. Hence, reset thread abort state if this is the last personality routine, // for managed code, on the stack. ResetThreadAbortState(pThread, pEstablisherFrame); STRESS_LOG0(LF_EH, LL_INFO100, "CPFH_UnwindHandler: Leaving with ExceptionContinueSearch\n"); return ExceptionContinueSearch; } // CPFH_UnwindHandler() //****************************************************************************** // This is the first handler that is called in the context of managed code // It is the first level of defense and tries to find a handler in the user // code to handle the exception //------------------------------------------------------------------------- // EXCEPTION_DISPOSITION __cdecl COMPlusFrameHandler( // EXCEPTION_RECORD *pExceptionRecord, // _EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, // CONTEXT *pContext, // DISPATCHER_CONTEXT *pDispatcherContext) // // See http://www.microsoft.com/msj/0197/exception/exception.aspx for a background piece on Windows // unmanaged structured exception handling. EXCEPTION_HANDLER_IMPL(COMPlusFrameHandler) { WRAPPER_NO_CONTRACT; _ASSERTE(!DebugIsEECxxException(pExceptionRecord) && "EE C++ Exception leaked into managed code!"); STRESS_LOG5(LF_EH, LL_INFO100, "In COMPlusFrameHander EH code = %x flag = %x EIP = %x with ESP = %x, pEstablisherFrame = 0x%p\n", pExceptionRecord->ExceptionCode, pExceptionRecord->ExceptionFlags, pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0, pEstablisherFrame); _ASSERTE((pContext == NULL) || ((pContext->ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL)); if (g_fNoExceptions) return ExceptionContinueSearch; // No EH during EE shutdown. // Check if the exception represents a GCStress Marker. If it does, // we shouldnt record its entry in the TLS as such exceptions are // continuable and can confuse the VM to treat them as CSE, // as they are implemented using illegal instruction exception. bool fIsGCMarker = false; #ifdef HAVE_GCCOVER // This is a debug only macro if (GCStress<cfg_instr_jit>::IsEnabled()) { // TlsGetValue trashes last error. When Complus_GCStress=4, GC is invoked // on every allowable JITed instruction by means of our exception handling machanism // it is very easy to trash the last error. For example, a p/invoke called a native method // which sets last error. Before we getting the last error in the IL stub, it is trashed here DWORD dwLastError = GetLastError(); fIsGCMarker = IsGcMarker(pContext, pExceptionRecord); if (!fIsGCMarker) { SaveCurrentExceptionInfo(pExceptionRecord, pContext); } SetLastError(dwLastError); } else #endif { // GCStress does not exist on retail builds (see IsGcMarker implementation for details). SaveCurrentExceptionInfo(pExceptionRecord, pContext); } if (fIsGCMarker) { // If this was a GCStress marker exception, then return // ExceptionContinueExecution to the OS. return ExceptionContinueExecution; } EXCEPTION_DISPOSITION retVal = ExceptionContinueSearch; Thread *pThread = GetThread(); if ((pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND)) == 0) { if (pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW) { EEPolicy::HandleStackOverflow(); // VC's unhandled exception filter plays with stack. It VirtualAlloc's a new stack, and // then launch Watson from the new stack. When Watson asks CLR to save required data, we // are not able to walk the stack. // Setting Context in ExInfo so that our Watson dump routine knows how to walk this stack. ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); pExInfo->m_pContext = pContext; // Save the reference to the topmost handler we see during first pass when an SO goes past us. // When an unwind gets triggered for the exception, we will reset the frame chain when we reach // the topmost handler we saw during the first pass. // // This unifies, behaviour-wise, 32bit with 64bit. if ((pExInfo->m_pTopMostHandlerDuringSO == NULL) || (pEstablisherFrame > pExInfo->m_pTopMostHandlerDuringSO)) { pExInfo->m_pTopMostHandlerDuringSO = pEstablisherFrame; } // Switch to preemp mode since we are returning back to the OS. // We will do the quick switch since we are short of stack FastInterlockAnd (&pThread->m_fPreemptiveGCDisabled, 0); return ExceptionContinueSearch; } } else { DWORD exceptionCode = pExceptionRecord->ExceptionCode; if (exceptionCode == STATUS_UNWIND) { // If exceptionCode is STATUS_UNWIND, RtlUnwind is called with a NULL ExceptionRecord, // therefore OS uses a faked ExceptionRecord with STATUS_UNWIND code. Then we need to // look at our saved exception code. exceptionCode = GetCurrentExceptionCode(); } if (exceptionCode == STATUS_STACK_OVERFLOW) { // We saved the context during the first pass in case the stack overflow exception is // unhandled and Watson dump code needs it. Now we are in the second pass, therefore // either the exception is handled by user code, or we have finished unhandled exception // filter process, and the OS is unwinding the stack. Either way, we don't need the // context any more. It is very important to reset the context so that our code does not // accidentally walk the frame using the dangling context in ExInfoWalker::WalkToPosition. ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); pExInfo->m_pContext = NULL; // We should have the reference to the topmost handler seen during the first pass of SO _ASSERTE(pExInfo->m_pTopMostHandlerDuringSO != NULL); // Reset frame chain till we reach the topmost establisher frame we saw in the first pass. // This will ensure that if any intermediary frame calls back into managed (e.g. native frame // containing a __finally that reverse pinvokes into managed), then we have the correct // explicit frame on the stack. Resetting the frame chain only when we reach the topmost // personality routine seen in the first pass may not result in expected behaviour, // specially during stack walks when crawl frame needs to be initialized from // explicit frame. if (pEstablisherFrame <= pExInfo->m_pTopMostHandlerDuringSO) { GCX_COOP_NO_DTOR(); if (pThread->GetFrame() < GetCurrFrame(pEstablisherFrame)) { // We are very short of stack. We avoid calling UnwindFrame which may // run unknown code here. pThread->SetFrame(GetCurrFrame(pEstablisherFrame)); } } // Switch to preemp mode since we are returning back to the OS. // We will do the quick switch since we are short of stack FastInterlockAnd(&pThread->m_fPreemptiveGCDisabled, 0); return ExceptionContinueSearch; } } if (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND)) { retVal = CPFH_UnwindHandler(pExceptionRecord, pEstablisherFrame, pContext, pDispatcherContext); } else { /* Make no assumptions about the current machine state. <TODO>@PERF: Only needs to be called by the very first handler invoked by SEH </TODO>*/ ResetCurrentContext(); retVal = CPFH_FirstPassHandler(pExceptionRecord, pEstablisherFrame, pContext, pDispatcherContext); } return retVal; } // COMPlusFrameHandler() //------------------------------------------------------------------------- // This is called by the EE to restore the stack pointer if necessary. //------------------------------------------------------------------------- // This can't be inlined into the caller to avoid introducing EH frame NOINLINE LPVOID COMPlusEndCatchWorker(Thread * pThread) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_COOPERATIVE; LOG((LF_EH, LL_INFO1000, "COMPlusPEndCatch:called with " "pThread:0x%x\n",pThread)); // indicate that we are out of the managed clause as early as possible ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); pExInfo->m_EHClauseInfo.SetManagedCodeEntered(FALSE); void* esp = NULL; // Notify the profiler that the catcher has finished running // IL stubs don't contain catch blocks so inability to perform this check does not matter. // if (!pFunc->IsILStub()) EEToProfilerExceptionInterfaceWrapper::ExceptionCatcherLeave(); // no need to set pExInfo->m_ClauseType = (DWORD)COR_PRF_CLAUSE_NONE now that the // notification is done because because the ExInfo record is about to be popped off anyway LOG((LF_EH, LL_INFO1000, "COMPlusPEndCatch:pThread:0x%x\n",pThread)); #ifdef _DEBUG gLastResumedExceptionFunc = NULL; gLastResumedExceptionHandler = 0; #endif // Set the thrown object to NULL as no longer needed. This also sets the last thrown object to NULL. pThread->SafeSetThrowables(NULL); // reset the stashed exception info pExInfo->m_pExceptionRecord = NULL; pExInfo->m_pContext = NULL; pExInfo->m_pExceptionPointers = NULL; if (pExInfo->m_pShadowSP) { *pExInfo->m_pShadowSP = 0; // Reset the shadow SP } // pExInfo->m_dEsp was set in ResumeAtJITEH(). It is the Esp of the // handler nesting level which catches the exception. esp = (void*)(size_t)pExInfo->m_dEsp; pExInfo->UnwindExInfo(esp); // Prepare to sync managed exception state // // In a case when we're nested inside another catch block, the domain in which we're executing may not be the // same as the one the domain of the throwable that was just made the current throwable above. Therefore, we // make a special effort to preserve the domain of the throwable as we update the the last thrown object. // // This function (COMPlusEndCatch) can also be called by the in-proc debugger helper thread on x86 when // an attempt to SetIP takes place to set IP outside the catch clause. In such a case, managed thread object // will not be available. Thus, we should reset the severity only if its not such a thread. // // This behaviour (of debugger doing SetIP) is not allowed on 64bit since the catch clauses are implemented // as a seperate funclet and it's just not allowed to set the IP across EH scopes, such as from inside a catch // clause to outside of the catch clause. bool fIsDebuggerHelperThread = (g_pDebugInterface == NULL) ? false : g_pDebugInterface->ThisIsHelperThread(); // Sync managed exception state, for the managed thread, based upon any active exception tracker pThread->SyncManagedExceptionState(fIsDebuggerHelperThread); LOG((LF_EH, LL_INFO1000, "COMPlusPEndCatch: esp=%p\n", esp)); return esp; } // // This function works in conjunction with JIT_EndCatch. On input, the parameters are set as follows: // ebp, ebx, edi, esi: the values of these registers at the end of the catch block // *pRetAddress: the next instruction after the call to JIT_EndCatch // // On output, *pRetAddress is the instruction at which to resume execution. This may be user code, // or it may be ThrowControlForThread (which will re-raise a pending ThreadAbortException). // // Returns the esp to set before resuming at *pRetAddress. // LPVOID STDCALL COMPlusEndCatch(LPVOID ebp, DWORD ebx, DWORD edi, DWORD esi, LPVOID* pRetAddress) { // // PopNestedExceptionRecords directly manipulates fs:[0] chain. This method can't have any EH! // STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_COOPERATIVE; ETW::ExceptionLog::ExceptionCatchEnd(); ETW::ExceptionLog::ExceptionThrownEnd(); void* esp = COMPlusEndCatchWorker(GetThread()); // We are going to resume at a handler nesting level whose esp is dEsp. Pop off any SEH records below it. This // would be the COMPlusNestedExceptionHandler we had inserted. PopNestedExceptionRecords(esp); // // Set up m_OSContext for the call to COMPlusCheckForAbort // Thread* pThread = GetThread(); SetIP(pThread->m_OSContext, (PCODE)*pRetAddress); SetSP(pThread->m_OSContext, (TADDR)esp); SetFP(pThread->m_OSContext, (TADDR)ebp); pThread->m_OSContext->Ebx = ebx; pThread->m_OSContext->Edi = edi; pThread->m_OSContext->Esi = esi; LPVOID throwControl = COMPlusCheckForAbort((UINT_PTR)*pRetAddress); if (throwControl) *pRetAddress = throwControl; return esp; } PEXCEPTION_REGISTRATION_RECORD GetCurrentSEHRecord() { WRAPPER_NO_CONTRACT; LPVOID fs0 = (LPVOID)__readfsdword(0); #if 0 // This walk is too expensive considering we hit it every time we a CONTRACT(NOTHROW) #ifdef _DEBUG EXCEPTION_REGISTRATION_RECORD *pEHR = (EXCEPTION_REGISTRATION_RECORD *)fs0; LPVOID spVal; __asm { mov spVal, esp } // check that all the eh frames are all greater than the current stack value. If not, the // stack has been updated somehow w/o unwinding the SEH chain. // LOG((LF_EH, LL_INFO1000000, "ER Chain:\n")); while (pEHR != NULL && pEHR != EXCEPTION_CHAIN_END) { // LOG((LF_EH, LL_INFO1000000, "\tp: prev:p handler:%x\n", pEHR, pEHR->Next, pEHR->Handler)); if (pEHR < spVal) { if (gLastResumedExceptionFunc != 0) _ASSERTE(!"Stack is greater than start of SEH chain - possible missing leave in handler. See gLastResumedExceptionHandler & gLastResumedExceptionFunc for info"); else _ASSERTE(!"Stack is greater than start of SEH chain (FS:0)"); } if (pEHR->Handler == (void *)-1) _ASSERTE(!"Handler value has been corrupted"); _ASSERTE(pEHR < pEHR->Next); pEHR = pEHR->Next; } #endif #endif // 0 return (EXCEPTION_REGISTRATION_RECORD*) fs0; } PEXCEPTION_REGISTRATION_RECORD GetFirstCOMPlusSEHRecord(Thread *pThread) { WRAPPER_NO_CONTRACT; EXCEPTION_REGISTRATION_RECORD *pEHR = *(pThread->GetExceptionListPtr()); if (pEHR == EXCEPTION_CHAIN_END || IsUnmanagedToManagedSEHHandler(pEHR)) { return pEHR; } else { return GetNextCOMPlusSEHRecord(pEHR); } } PEXCEPTION_REGISTRATION_RECORD GetPrevSEHRecord(EXCEPTION_REGISTRATION_RECORD *next) { WRAPPER_NO_CONTRACT; _ASSERTE(IsUnmanagedToManagedSEHHandler(next)); EXCEPTION_REGISTRATION_RECORD *pEHR = GetCurrentSEHRecord(); _ASSERTE(pEHR != 0 && pEHR != EXCEPTION_CHAIN_END); EXCEPTION_REGISTRATION_RECORD *pBest = 0; while (pEHR != next) { if (IsUnmanagedToManagedSEHHandler(pEHR)) pBest = pEHR; pEHR = pEHR->Next; _ASSERTE(pEHR != 0 && pEHR != EXCEPTION_CHAIN_END); } return pBest; } VOID SetCurrentSEHRecord(EXCEPTION_REGISTRATION_RECORD *pSEH) { WRAPPER_NO_CONTRACT; *GetThread()->GetExceptionListPtr() = pSEH; } // Note that this logic is copied below, in PopSEHRecords __declspec(naked) VOID __cdecl PopSEHRecords(LPVOID pTargetSP) { // No CONTRACT possible on naked functions STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; __asm{ mov ecx, [esp+4] ;; ecx <- pTargetSP mov eax, fs:[0] ;; get current SEH record poploop: cmp eax, ecx jge done mov eax, [eax] ;; get next SEH record jmp poploop done: mov fs:[0], eax retn } } // // Unwind pExinfo, pops FS:[0] handlers until the interception context SP, and // resumes at interception context. // VOID UnwindExceptionTrackerAndResumeInInterceptionFrame(ExInfo* pExInfo, EHContext* context) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_MODE_COOPERATIVE; _ASSERTE(pExInfo && context); pExInfo->UnwindExInfo((LPVOID)(size_t)context->Esp); PopNestedExceptionRecords((LPVOID)(size_t)context->Esp); STRESS_LOG3(LF_EH|LF_CORDB, LL_INFO100, "UnwindExceptionTrackerAndResumeInInterceptionFrame: completing intercept at EIP = %p ESP = %p EBP = %p\n", context->Eip, context->Esp, context->Ebp); ResumeAtJitEHHelper(context); UNREACHABLE_MSG("Should never return from ResumeAtJitEHHelper!"); } // // Pop SEH records below the given target ESP. This is only used to pop nested exception records. // If bCheckForUnknownHandlers is set, it only checks for unknown FS:[0] handlers. // BOOL PopNestedExceptionRecords(LPVOID pTargetSP, BOOL bCheckForUnknownHandlers) { // No CONTRACT here, because we can't run the risk of it pushing any SEH into the current method. STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; PEXCEPTION_REGISTRATION_RECORD pEHR = GetCurrentSEHRecord(); while ((LPVOID)pEHR < pTargetSP) { // // The only handler types we're allowed to have below the limit on the FS:0 chain in these cases are a // nested exception record or a fast NExport record, so we verify that here. // // There is a special case, of course: for an unhandled exception, when the default handler does the exit // unwind, we may have an exception that escapes a finally clause, thus replacing the original unhandled // exception. If we find a catcher for that new exception, then we'll go ahead and do our own unwind, then // jump to the catch. When we are called here, just before jumpping to the catch, we'll pop off our nested // handlers, then we'll pop off one more handler: the handler that ntdll!ExecuteHandler2 pushed before // calling our nested handler. We go ahead and pop off that handler, too. Its okay, its only there to catch // exceptions from handlers and turn them into collided unwind status codes... there's no cleanup in the // handler that we're removing, and that's the important point. The handler that ExecuteHandler2 pushes // isn't a public export from ntdll, but its named "UnwindHandler" and is physically shortly after // ExecuteHandler2 in ntdll. // In this case, we don't want to pop off the NExportSEH handler since it's our outermost handler. // static HINSTANCE ExecuteHandler2Module = 0; static BOOL ExecuteHandler2ModuleInited = FALSE; // Cache the handle to the dll with the handler pushed by ExecuteHandler2. if (!ExecuteHandler2ModuleInited) { ExecuteHandler2Module = WszGetModuleHandle(W("ntdll.dll")); ExecuteHandler2ModuleInited = TRUE; } if (bCheckForUnknownHandlers) { if (!IsComPlusNestedExceptionRecord(pEHR) || !((ExecuteHandler2Module != NULL) && IsIPInModule(ExecuteHandler2Module, (PCODE)pEHR->Handler))) { return TRUE; } } #ifdef _DEBUG else { // Note: if we can't find the module containing ExecuteHandler2, we'll just be really strict and require // that we're only popping nested handlers or the FastNExportSEH handler. _ASSERTE(FastNExportSEH(pEHR) || IsComPlusNestedExceptionRecord(pEHR) || ((ExecuteHandler2Module != NULL) && IsIPInModule(ExecuteHandler2Module, (PCODE)pEHR->Handler))); } #endif // _DEBUG pEHR = pEHR->Next; } if (!bCheckForUnknownHandlers) { SetCurrentSEHRecord(pEHR); } return FALSE; } // // This is implemented differently from the PopNestedExceptionRecords above because it's called in the context of // the DebuggerRCThread to operate on the stack of another thread. // VOID PopNestedExceptionRecords(LPVOID pTargetSP, CONTEXT *pCtx, void *pSEH) { // No CONTRACT here, because we can't run the risk of it pushing any SEH into the current method. STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; #ifdef _DEBUG LOG((LF_CORDB,LL_INFO1000, "\nPrintSEHRecords:\n")); EXCEPTION_REGISTRATION_RECORD *pEHR = (EXCEPTION_REGISTRATION_RECORD *)(size_t)*(DWORD *)pSEH; // check that all the eh frames are all greater than the current stack value. If not, the // stack has been updated somehow w/o unwinding the SEH chain. while (pEHR != NULL && pEHR != EXCEPTION_CHAIN_END) { LOG((LF_EH, LL_INFO1000000, "\t%08x: next:%08x handler:%x\n", pEHR, pEHR->Next, pEHR->Handler)); pEHR = pEHR->Next; } #endif DWORD dwCur = *(DWORD*)pSEH; // 'EAX' in the original routine DWORD dwPrev = (DWORD)(size_t)pSEH; while (dwCur < (DWORD)(size_t)pTargetSP) { // Watch for the OS handler // for nested exceptions, or any C++ handlers for destructors in our call // stack, or anything else. if (dwCur < (DWORD)GetSP(pCtx)) dwPrev = dwCur; dwCur = *(DWORD *)(size_t)dwCur; LOG((LF_CORDB,LL_INFO10000, "dwCur: 0x%x dwPrev:0x%x pTargetSP:0x%x\n", dwCur, dwPrev, pTargetSP)); } *(DWORD *)(size_t)dwPrev = dwCur; #ifdef _DEBUG pEHR = (EXCEPTION_REGISTRATION_RECORD *)(size_t)*(DWORD *)pSEH; // check that all the eh frames are all greater than the current stack value. If not, the // stack has been updated somehow w/o unwinding the SEH chain. LOG((LF_CORDB,LL_INFO1000, "\nPopSEHRecords:\n")); while (pEHR != NULL && pEHR != (void *)-1) { LOG((LF_EH, LL_INFO1000000, "\t%08x: next:%08x handler:%x\n", pEHR, pEHR->Next, pEHR->Handler)); pEHR = pEHR->Next; } #endif } //========================================================================== // COMPlusThrowCallback // //========================================================================== /* * * COMPlusThrowCallbackHelper * * This function is a simple helper function for COMPlusThrowCallback. It is needed * because of the EX_TRY macro. This macro does an alloca(), which allocates space * off the stack, not free'ing it. Thus, doing a EX_TRY in a loop can easily result * in a stack overflow error. By factoring out the EX_TRY into a separate function, * we recover that stack space. * * Parameters: * pJitManager - The JIT manager that will filter the EH. * pCf - The frame to crawl. * EHClausePtr * nestingLevel * pThread - Used to determine if the thread is throwable or not. * * Return: * Exception status. * */ int COMPlusThrowCallbackHelper(IJitManager *pJitManager, CrawlFrame *pCf, ThrowCallbackType* pData, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel, OBJECTREF throwable, Thread *pThread ) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; int iFilt = 0; EX_TRY { GCPROTECT_BEGIN (throwable); // We want to call filters even if the thread is aborting, so suppress abort // checks while the filter runs. ThreadPreventAsyncHolder preventAbort; BYTE* startAddress = (BYTE*)pCf->GetCodeInfo()->GetStartAddress(); iFilt = ::CallJitEHFilter(pCf, startAddress, EHClausePtr, nestingLevel, throwable); GCPROTECT_END(); } EX_CATCH { // We had an exception in filter invocation that remained unhandled. // Sync managed exception state, for the managed thread, based upon the active exception tracker. pThread->SyncManagedExceptionState(false); // // Swallow exception. Treat as exception continue search. // iFilt = EXCEPTION_CONTINUE_SEARCH; } EX_END_CATCH(SwallowAllExceptions) return iFilt; } //****************************************************************************** // The stack walk callback for exception handling on x86. // Returns one of: // SWA_CONTINUE = 0, // continue walking // SWA_ABORT = 1, // stop walking, early out in "failure case" // SWA_FAILED = 2 // couldn't walk stack StackWalkAction COMPlusThrowCallback( // SWA value CrawlFrame *pCf, // Data from StackWalkFramesEx ThrowCallbackType *pData) // Context data passed through from CPFH { // We don't want to use a runtime contract here since this codepath is used during // the processing of a hard SO. Contracts use a significant amount of stack // which we can't afford for those cases. STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_COOPERATIVE; Frame *pFrame = pCf->GetFrame(); MethodDesc *pFunc = pCf->GetFunction(); #if defined(_DEBUG) #define METHODNAME(pFunc) (pFunc?pFunc->m_pszDebugMethodName:"<n/a>") #else #define METHODNAME(pFunc) "<n/a>" #endif STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusThrowCallback: STACKCRAWL method:%pM ('%s'), Frame:%p, FrameVtable = %pV\n", pFunc, METHODNAME(pFunc), pFrame, pCf->IsFrameless()?0:(*(void**)pFrame)); #undef METHODNAME Thread *pThread = GetThread(); if (pFrame && pData->pTopFrame == pFrame) /* Don't look past limiting frame if there is one */ return SWA_ABORT; if (!pFunc) return SWA_CONTINUE; if (pThread->IsRudeAbortInitiated()) { return SWA_CONTINUE; } ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); _ASSERTE(!pData->bIsUnwind); #ifdef _DEBUG // It SHOULD be the case that any frames we consider live between this exception // record and the previous one. if (!pExInfo->m_pPrevNestedInfo) { if (pData->pCurrentExceptionRecord) { if (pFrame) _ASSERTE(pData->pCurrentExceptionRecord > pFrame); // The FastNExport SEH handler can be in the frame we just unwound and as a result just out of range. if (pCf->IsFrameless() && !FastNExportSEH((PEXCEPTION_REGISTRATION_RECORD)pData->pCurrentExceptionRecord)) { _ASSERTE((ULONG_PTR)pData->pCurrentExceptionRecord >= GetRegdisplaySP(pCf->GetRegisterSet())); } } if (pData->pPrevExceptionRecord) { // FCALLS have an extra SEH record in debug because of the desctructor // associated with ForbidGC checking. This is benign, so just ignore it. if (pFrame) _ASSERTE(pData->pPrevExceptionRecord < pFrame || pFrame->GetVTablePtr() == HelperMethodFrame::GetMethodFrameVPtr()); if (pCf->IsFrameless()) _ASSERTE((ULONG_PTR)pData->pPrevExceptionRecord <= GetRegdisplaySP(pCf->GetRegisterSet())); } } #endif UINT_PTR currentIP = 0; UINT_PTR currentSP = 0; if (pCf->IsFrameless()) { currentIP = (UINT_PTR)GetControlPC(pCf->GetRegisterSet()); currentSP = (UINT_PTR)GetRegdisplaySP(pCf->GetRegisterSet()); } else if (InlinedCallFrame::FrameHasActiveCall(pFrame)) { // don't have the IP, SP for native code currentIP = 0; currentSP = 0; } else { currentIP = (UINT_PTR)(pCf->GetFrame()->GetIP()); currentSP = 0; //Don't have an SP to get. } if (!pFunc->IsILStub()) { // Append the current frame to the stack trace and save the save trace to the managed Exception object. pExInfo->m_StackTraceInfo.AppendElement(pData->bAllowAllocMem, currentIP, currentSP, pFunc, pCf); pExInfo->m_StackTraceInfo.SaveStackTrace(pData->bAllowAllocMem, pThread->GetThrowableAsHandle(), pData->bReplaceStack, pData->bSkipLastElement); } else { LOG((LF_EH, LL_INFO1000, "COMPlusThrowCallback: Skipping AppendElement/SaveStackTrace for IL stub MD %p\n", pFunc)); } // Fire an exception thrown ETW event when an exception occurs ETW::ExceptionLog::ExceptionThrown(pCf, pData->bSkipLastElement, pData->bReplaceStack); // Reset the flags. These flags are set only once before each stack walk done by LookForHandler(), and // they apply only to the first frame we append to the stack trace. Subsequent frames are always appended. if (pData->bReplaceStack) { pData->bReplaceStack = FALSE; } if (pData->bSkipLastElement) { pData->bSkipLastElement = FALSE; } // now we've got the stack trace, if we aren't allowed to catch this and we're first pass, return if (pData->bDontCatch) return SWA_CONTINUE; if (!pCf->IsFrameless()) { // @todo - remove this once SIS is fully enabled. extern bool g_EnableSIS; if (g_EnableSIS) { // For debugger, we may want to notify 1st chance exceptions if they're coming out of a stub. // We recognize stubs as Frames with a M2U transition type. The debugger's stackwalker also // recognizes these frames and publishes ICorDebugInternalFrames in the stackwalk. It's // important to use pFrame as the stack address so that the Exception callback matches up // w/ the ICorDebugInternlFrame stack range. if (CORDebuggerAttached()) { Frame * pFrameStub = pCf->GetFrame(); Frame::ETransitionType t = pFrameStub->GetTransitionType(); if (t == Frame::TT_M2U) { // Use address of the frame as the stack address. currentSP = (SIZE_T) ((void*) pFrameStub); currentIP = 0; // no IP. EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedException(pThread, (SIZE_T)currentIP, (SIZE_T)currentSP); // Deliver the FirstChanceNotification after the debugger, if not already delivered. if (!pExInfo->DeliveredFirstChanceNotification()) { ExceptionNotifications::DeliverFirstChanceNotification(); } } } } return SWA_CONTINUE; } bool fIsILStub = pFunc->IsILStub(); bool fGiveDebuggerAndProfilerNotification = !fIsILStub; BOOL fMethodCanHandleException = TRUE; MethodDesc * pUserMDForILStub = NULL; Frame * pILStubFrame = NULL; if (fIsILStub) pUserMDForILStub = GetUserMethodForILStub(pThread, currentSP, pFunc, &pILStubFrame); // Let the profiler know that we are searching for a handler within this function instance if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionEnter(pFunc); // The following debugger notification and AppDomain::FirstChanceNotification should be scoped together // since the AD notification *must* follow immediately after the debugger's notification. { #ifdef DEBUGGING_SUPPORTED // // Go ahead and notify any debugger of this exception. // EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedException(pThread, (SIZE_T)currentIP, (SIZE_T)currentSP); if (CORDebuggerAttached() && pExInfo->m_ExceptionFlags.DebuggerInterceptInfo()) { return SWA_ABORT; } #endif // DEBUGGING_SUPPORTED // Attempt to deliver the first chance notification to the AD only *AFTER* the debugger // has done that, provided we have not already done that. if (!pExInfo->DeliveredFirstChanceNotification()) { ExceptionNotifications::DeliverFirstChanceNotification(); } } IJitManager* pJitManager = pCf->GetJitManager(); _ASSERTE(pJitManager); EH_CLAUSE_ENUMERATOR pEnumState; unsigned EHCount = pJitManager->InitializeEHEnumeration(pCf->GetMethodToken(), &pEnumState); if (EHCount == 0) { // Inform the profiler that we're leaving, and what pass we're on if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc); return SWA_CONTINUE; } TypeHandle thrownType = TypeHandle(); // if we are being called on an unwind for an exception that we did not try to catch, eg. // an internal EE exception, then pThread->GetThrowable will be null { OBJECTREF throwable = pThread->GetThrowable(); if (throwable != NULL) { throwable = PossiblyUnwrapThrowable(throwable, pCf->GetAssembly()); thrownType = TypeHandle(throwable->GetMethodTable()); } } PREGDISPLAY regs = pCf->GetRegisterSet(); BYTE *pStack = (BYTE *) GetRegdisplaySP(regs); #ifdef DEBUGGING_SUPPORTED BYTE *pHandlerEBP = (BYTE *) GetRegdisplayFP(regs); #endif DWORD offs = (DWORD)pCf->GetRelOffset(); //= (BYTE*) (*regs->pPC) - (BYTE*) pCf->GetStartAddress(); STRESS_LOG1(LF_EH, LL_INFO10000, "COMPlusThrowCallback: offset is %d\n", offs); EE_ILEXCEPTION_CLAUSE EHClause; unsigned start_adjust, end_adjust; start_adjust = !(pCf->HasFaulted() || pCf->IsIPadjusted()); end_adjust = pCf->IsActiveFunc(); for(ULONG i=0; i < EHCount; i++) { pJitManager->GetNextEHClause(&pEnumState, &EHClause); _ASSERTE(IsValidClause(&EHClause)); STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusThrowCallback: considering '%s' clause [%d,%d], ofs:%d\n", (IsFault(&EHClause) ? "fault" : ( IsFinally(&EHClause) ? "finally" : ( IsFilterHandler(&EHClause) ? "filter" : ( IsTypedHandler(&EHClause) ? "typed" : "unknown")))), EHClause.TryStartPC, EHClause.TryEndPC, offs ); // Checking the exception range is a bit tricky because // on CPU faults (null pointer access, div 0, ..., the IP points // to the faulting instruction, but on calls, the IP points // to the next instruction. // This means that we should not include the start point on calls // as this would be a call just preceding the try block. // Also, we should include the end point on calls, but not faults. // If we're in the FILTER part of a filter clause, then we // want to stop crawling. It's going to be caught in a // EX_CATCH just above us. If not, the exception if ( IsFilterHandler(&EHClause) && ( offs > EHClause.FilterOffset || (offs == EHClause.FilterOffset && !start_adjust) ) && ( offs < EHClause.HandlerStartPC || (offs == EHClause.HandlerStartPC && !end_adjust) )) { STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusThrowCallback: Fault inside filter [%d,%d] startAdj %d endAdj %d\n", EHClause.FilterOffset, EHClause.HandlerStartPC, start_adjust, end_adjust); if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc); return SWA_ABORT; } if ( (offs < EHClause.TryStartPC) || (offs > EHClause.TryEndPC) || (offs == EHClause.TryStartPC && start_adjust) || (offs == EHClause.TryEndPC && end_adjust)) continue; BOOL typeMatch = FALSE; BOOL isTypedHandler = IsTypedHandler(&EHClause); if (isTypedHandler && !thrownType.IsNull()) { if (EHClause.TypeHandle == (void*)(size_t)mdTypeRefNil) { // this is a catch(...) typeMatch = TRUE; } else { TypeHandle exnType = pJitManager->ResolveEHClause(&EHClause,pCf); // if doesn't have cached class then class wasn't loaded so couldn't have been thrown typeMatch = !exnType.IsNull() && ExceptionIsOfRightType(exnType, thrownType); } } // <TODO>@PERF: Is this too expensive? Consider storing the nesting level // instead of the HandlerEndPC.</TODO> // Determine the nesting level of EHClause. Just walk the table // again, and find out how many handlers enclose it DWORD nestingLevel = 0; if (IsFaultOrFinally(&EHClause)) continue; if (isTypedHandler) { LOG((LF_EH, LL_INFO100, "COMPlusThrowCallback: %s match for typed handler.\n", typeMatch?"Found":"Did not find")); if (!typeMatch) { continue; } } else { // Must be an exception filter (__except() part of __try{}__except(){}). nestingLevel = ComputeEnclosingHandlerNestingLevel(pJitManager, pCf->GetMethodToken(), EHClause.HandlerStartPC); // We just need *any* address within the method. This will let the debugger // resolve the EnC version of the method. PCODE pMethodAddr = GetControlPC(regs); if (fGiveDebuggerAndProfilerNotification) EEToDebuggerExceptionInterfaceWrapper::ExceptionFilter(pFunc, pMethodAddr, EHClause.FilterOffset, pHandlerEBP); UINT_PTR uStartAddress = (UINT_PTR)pCf->GetCodeInfo()->GetStartAddress(); // save clause information in the exinfo pExInfo->m_EHClauseInfo.SetInfo(COR_PRF_CLAUSE_FILTER, uStartAddress + EHClause.FilterOffset, StackFrame((UINT_PTR)pHandlerEBP)); // Let the profiler know we are entering a filter if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterEnter(pFunc); STRESS_LOG3(LF_EH, LL_INFO10, "COMPlusThrowCallback: calling filter code, EHClausePtr:%08x, Start:%08x, End:%08x\n", &EHClause, EHClause.HandlerStartPC, EHClause.HandlerEndPC); OBJECTREF throwable = PossiblyUnwrapThrowable(pThread->GetThrowable(), pCf->GetAssembly()); pExInfo->m_EHClauseInfo.SetManagedCodeEntered(TRUE); int iFilt = COMPlusThrowCallbackHelper(pJitManager, pCf, pData, &EHClause, nestingLevel, throwable, pThread); pExInfo->m_EHClauseInfo.SetManagedCodeEntered(FALSE); // Let the profiler know we are leaving a filter if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterLeave(); pExInfo->m_EHClauseInfo.ResetInfo(); if (pThread->IsRudeAbortInitiated()) { if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc); return SWA_CONTINUE; } // If this filter didn't want the exception, keep looking. if (EXCEPTION_EXECUTE_HANDLER != iFilt) continue; } // Record this location, to stop the unwind phase, later. pData->pFunc = pFunc; pData->dHandler = i; pData->pStack = pStack; // Notify the profiler that a catcher has been found if (fGiveDebuggerAndProfilerNotification) { EEToProfilerExceptionInterfaceWrapper::ExceptionSearchCatcherFound(pFunc); EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc); } #ifdef DEBUGGING_SUPPORTED // // Notify debugger that a catcher has been found. // if (fIsILStub) { EEToDebuggerExceptionInterfaceWrapper::NotifyOfCHFFilter(pExInfo->m_pExceptionPointers, pILStubFrame); } else if (fGiveDebuggerAndProfilerNotification && CORDebuggerAttached() && !pExInfo->m_ExceptionFlags.DebuggerInterceptInfo()) { _ASSERTE(pData); // We just need *any* address within the method. This will let the debugger // resolve the EnC version of the method. PCODE pMethodAddr = GetControlPC(regs); EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedExceptionCatcherFound(pThread, pData->pFunc, pMethodAddr, (SIZE_T)pData->pStack, &EHClause); } #endif // DEBUGGING_SUPPORTED return SWA_ABORT; } if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc); return SWA_CONTINUE; } // StackWalkAction COMPlusThrowCallback() //========================================================================== // COMPlusUnwindCallback //========================================================================== #if defined(_MSC_VER) #pragma warning(push) #pragma warning (disable : 4740) // There is inline asm code in this function, which disables // global optimizations. #pragma warning (disable : 4731) #endif StackWalkAction COMPlusUnwindCallback (CrawlFrame *pCf, ThrowCallbackType *pData) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_MODE_COOPERATIVE; _ASSERTE(pData->bIsUnwind); Frame *pFrame = pCf->GetFrame(); MethodDesc *pFunc = pCf->GetFunction(); #if defined(_DEBUG) #define METHODNAME(pFunc) (pFunc?pFunc->m_pszDebugMethodName:"<n/a>") #else #define METHODNAME(pFunc) "<n/a>" #endif STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: STACKCRAWL method:%pM ('%s'), Frame:%p, FrameVtable = %pV\n", pFunc, METHODNAME(pFunc), pFrame, pCf->IsFrameless()?0:(*(void**)pFrame)); #undef METHODNAME if (pFrame && pData->pTopFrame == pFrame) /* Don't look past limiting frame if there is one */ return SWA_ABORT; if (!pFunc) return SWA_CONTINUE; if (!pCf->IsFrameless()) return SWA_CONTINUE; Thread *pThread = GetThread(); // If the thread is being RudeAbort, we will not run any finally if (pThread->IsRudeAbortInitiated()) { return SWA_CONTINUE; } IJitManager* pJitManager = pCf->GetJitManager(); _ASSERTE(pJitManager); ExInfo *pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); PREGDISPLAY regs = pCf->GetRegisterSet(); BYTE *pStack = (BYTE *) GetRegdisplaySP(regs); TypeHandle thrownType = TypeHandle(); #ifdef DEBUGGING_SUPPORTED LOG((LF_EH, LL_INFO1000, "COMPlusUnwindCallback: Intercept %d, pData->pFunc 0x%X, pFunc 0x%X, pData->pStack 0x%X, pStack 0x%X\n", pExInfo->m_ExceptionFlags.DebuggerInterceptInfo(), pData->pFunc, pFunc, pData->pStack, pStack)); // // If the debugger wants to intercept this exception here, go do that. // if (pExInfo->m_ExceptionFlags.DebuggerInterceptInfo() && (pData->pFunc == pFunc) && (pData->pStack == pStack)) { goto LDoDebuggerIntercept; } #endif bool fGiveDebuggerAndProfilerNotification; fGiveDebuggerAndProfilerNotification = !pFunc->IsILStub(); // Notify the profiler of the function we're dealing with in the unwind phase if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionEnter(pFunc); EH_CLAUSE_ENUMERATOR pEnumState; unsigned EHCount = pJitManager->InitializeEHEnumeration(pCf->GetMethodToken(), &pEnumState); if (EHCount == 0) { // Inform the profiler that we're leaving, and what pass we're on if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pFunc); return SWA_CONTINUE; } // if we are being called on an unwind for an exception that we did not try to catch, eg. // an internal EE exception, then pThread->GetThrowable will be null { OBJECTREF throwable = pThread->GetThrowable(); if (throwable != NULL) { throwable = PossiblyUnwrapThrowable(throwable, pCf->GetAssembly()); thrownType = TypeHandle(throwable->GetMethodTable()); } } #ifdef DEBUGGING_SUPPORTED BYTE *pHandlerEBP; pHandlerEBP = (BYTE *) GetRegdisplayFP(regs); #endif DWORD offs; offs = (DWORD)pCf->GetRelOffset(); //= (BYTE*) (*regs->pPC) - (BYTE*) pCf->GetStartAddress(); LOG((LF_EH, LL_INFO100, "COMPlusUnwindCallback: current EIP offset in method 0x%x, \n", offs)); EE_ILEXCEPTION_CLAUSE EHClause; unsigned start_adjust, end_adjust; start_adjust = !(pCf->HasFaulted() || pCf->IsIPadjusted()); end_adjust = pCf->IsActiveFunc(); for(ULONG i=0; i < EHCount; i++) { pJitManager->GetNextEHClause(&pEnumState, &EHClause); _ASSERTE(IsValidClause(&EHClause)); STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: considering '%s' clause [%d,%d], offs:%d\n", (IsFault(&EHClause) ? "fault" : ( IsFinally(&EHClause) ? "finally" : ( IsFilterHandler(&EHClause) ? "filter" : ( IsTypedHandler(&EHClause) ? "typed" : "unknown")))), EHClause.TryStartPC, EHClause.TryEndPC, offs ); // Checking the exception range is a bit tricky because // on CPU faults (null pointer access, div 0, ..., the IP points // to the faulting instruction, but on calls, the IP points // to the next instruction. // This means that we should not include the start point on calls // as this would be a call just preceding the try block. // Also, we should include the end point on calls, but not faults. if ( IsFilterHandler(&EHClause) && ( offs > EHClause.FilterOffset || (offs == EHClause.FilterOffset && !start_adjust) ) && ( offs < EHClause.HandlerStartPC || (offs == EHClause.HandlerStartPC && !end_adjust) ) ) { STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: Fault inside filter [%d,%d] startAdj %d endAdj %d\n", EHClause.FilterOffset, EHClause.HandlerStartPC, start_adjust, end_adjust); // Make the filter as done. See comment in CallJitEHFilter // on why we have to do it here. Frame* pFilterFrame = pThread->GetFrame(); _ASSERTE(pFilterFrame->GetVTablePtr() == ExceptionFilterFrame::GetMethodFrameVPtr()); ((ExceptionFilterFrame*)pFilterFrame)->SetFilterDone(); // Inform the profiler that we're leaving, and what pass we're on if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pFunc); return SWA_ABORT; } if ( (offs < EHClause.TryStartPC) || (offs > EHClause.TryEndPC) || (offs == EHClause.TryStartPC && start_adjust) || (offs == EHClause.TryEndPC && end_adjust)) continue; // <TODO>@PERF : Is this too expensive? Consider storing the nesting level // instead of the HandlerEndPC.</TODO> // Determine the nesting level of EHClause. Just walk the table // again, and find out how many handlers enclose it DWORD nestingLevel = ComputeEnclosingHandlerNestingLevel(pJitManager, pCf->GetMethodToken(), EHClause.HandlerStartPC); // We just need *any* address within the method. This will let the debugger // resolve the EnC version of the method. PCODE pMethodAddr = GetControlPC(regs); UINT_PTR uStartAddress = (UINT_PTR)pCf->GetCodeInfo()->GetStartAddress(); if (IsFaultOrFinally(&EHClause)) { if (fGiveDebuggerAndProfilerNotification) EEToDebuggerExceptionInterfaceWrapper::ExceptionHandle(pFunc, pMethodAddr, EHClause.HandlerStartPC, pHandlerEBP); pExInfo->m_EHClauseInfo.SetInfo(COR_PRF_CLAUSE_FINALLY, uStartAddress + EHClause.HandlerStartPC, StackFrame((UINT_PTR)pHandlerEBP)); // Notify the profiler that we are about to execute the finally code if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFinallyEnter(pFunc); LOG((LF_EH, LL_INFO100, "COMPlusUnwindCallback: finally clause [%d,%d] - call\n", EHClause.TryStartPC, EHClause.TryEndPC)); pExInfo->m_EHClauseInfo.SetManagedCodeEntered(TRUE); ::CallJitEHFinally(pCf, (BYTE *)uStartAddress, &EHClause, nestingLevel); pExInfo->m_EHClauseInfo.SetManagedCodeEntered(FALSE); LOG((LF_EH, LL_INFO100, "COMPlusUnwindCallback: finally - returned\n")); // Notify the profiler that we are done with the finally code if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFinallyLeave(); pExInfo->m_EHClauseInfo.ResetInfo(); continue; } // Current is not a finally, check if it's the catching handler (or filter). if (pData->pFunc != pFunc || (ULONG)(pData->dHandler) != i || pData->pStack != pStack) { continue; } #ifdef _DEBUG gLastResumedExceptionFunc = pCf->GetFunction(); gLastResumedExceptionHandler = i; #endif // save clause information in the exinfo pExInfo->m_EHClauseInfo.SetInfo(COR_PRF_CLAUSE_CATCH, uStartAddress + EHClause.HandlerStartPC, StackFrame((UINT_PTR)pHandlerEBP)); // Notify the profiler that we are about to resume at the catcher. if (fGiveDebuggerAndProfilerNotification) { DACNotify::DoExceptionCatcherEnterNotification(pFunc, EHClause.HandlerStartPC); EEToProfilerExceptionInterfaceWrapper::ExceptionCatcherEnter(pThread, pFunc); EEToDebuggerExceptionInterfaceWrapper::ExceptionHandle(pFunc, pMethodAddr, EHClause.HandlerStartPC, pHandlerEBP); } STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: offset 0x%x matches clause [0x%x, 0x%x) matches in method %pM\n", offs, EHClause.TryStartPC, EHClause.TryEndPC, pFunc); // ResumeAtJitEH will set pExInfo->m_EHClauseInfo.m_fManagedCodeEntered = TRUE; at the appropriate time ::ResumeAtJitEH(pCf, (BYTE *)uStartAddress, &EHClause, nestingLevel, pThread, pData->bUnwindStack); //UNREACHABLE_MSG("ResumeAtJitEH shouldn't have returned!"); // we do not set pExInfo->m_EHClauseInfo.m_fManagedCodeEntered = FALSE here, // that happens when the catch clause calls back to COMPlusEndCatch } STRESS_LOG1(LF_EH, LL_INFO100, "COMPlusUnwindCallback: no handler found in method %pM\n", pFunc); if (fGiveDebuggerAndProfilerNotification) EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pFunc); return SWA_CONTINUE; #ifdef DEBUGGING_SUPPORTED LDoDebuggerIntercept: STRESS_LOG1(LF_EH|LF_CORDB, LL_INFO100, "COMPlusUnwindCallback: Intercepting in method %pM\n", pFunc); // // Setup up the easy parts of the context to restart at. // EHContext context; // // Note: EAX ECX EDX are scratch // context.Esp = (DWORD)(size_t)(GetRegdisplaySP(regs)); context.Ebx = *regs->pEbx; context.Esi = *regs->pEsi; context.Edi = *regs->pEdi; context.Ebp = *regs->pEbp; // // Set scratch registers to 0 to avoid reporting incorrect values to GC in case of debugger changing the IP // in the middle of a scratch register lifetime (see Dev10 754922) // context.Eax = 0; context.Ecx = 0; context.Edx = 0; // // Ok, now set the target Eip to the address the debugger requested. // ULONG_PTR nativeOffset; pExInfo->m_DebuggerExState.GetDebuggerInterceptInfo(NULL, NULL, NULL, NULL, &nativeOffset, NULL); context.Eip = GetControlPC(regs) - (pCf->GetRelOffset() - nativeOffset); // // Finally we need to get the correct Esp for this nested level // context.Esp = pCf->GetCodeManager()->GetAmbientSP(regs, pCf->GetCodeInfo(), nativeOffset, pData->dHandler, pCf->GetCodeManState() ); // // In case we see unknown FS:[0] handlers we delay the interception point until we reach the handler that protects the interception point. // This way we have both FS:[0] handlers being poped up by RtlUnwind and managed capital F Frames being unwinded by managed stackwalker. // BOOL fCheckForUnknownHandler = TRUE; if (PopNestedExceptionRecords((LPVOID)(size_t)context.Esp, fCheckForUnknownHandler)) { // Let ClrDebuggerDoUnwindAndIntercept RtlUnwind continue to unwind frames until we reach the handler protected by COMPlusNestedExceptionHandler. pExInfo->m_InterceptionContext = context; pExInfo->m_ValidInterceptionContext = TRUE; STRESS_LOG0(LF_EH|LF_CORDB, LL_INFO100, "COMPlusUnwindCallback: Skip interception until unwinding reaches the actual handler protected by COMPlusNestedExceptionHandler\n"); } else { // // Pop off all the Exception information up to this point in the stack // UnwindExceptionTrackerAndResumeInInterceptionFrame(pExInfo, &context); } return SWA_ABORT; #endif // DEBUGGING_SUPPORTED } // StackWalkAction COMPlusUnwindCallback () #if defined(_MSC_VER) #pragma warning(pop) #endif #if defined(_MSC_VER) #pragma warning(push) #pragma warning (disable : 4740) // There is inline asm code in this function, which disables // global optimizations. #pragma warning (disable : 4731) #endif void ResumeAtJitEH(CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel, Thread *pThread, BOOL unwindStack) { // No dynamic contract here because this function doesn't return and destructors wouldn't be executed WRAPPER_NO_CONTRACT; EHContext context; context.Setup(PCODE(startPC + EHClausePtr->HandlerStartPC), pCf->GetRegisterSet()); size_t * pShadowSP = NULL; // Write Esp to *pShadowSP before jumping to handler size_t * pHandlerEnd = NULL; OBJECTREF throwable = PossiblyUnwrapThrowable(pThread->GetThrowable(), pCf->GetAssembly()); pCf->GetCodeManager()->FixContext(ICodeManager::CATCH_CONTEXT, &context, pCf->GetCodeInfo(), EHClausePtr->HandlerStartPC, nestingLevel, throwable, pCf->GetCodeManState(), &pShadowSP, &pHandlerEnd); if (pHandlerEnd) { *pHandlerEnd = EHClausePtr->HandlerEndPC; } MethodDesc* pMethodDesc = pCf->GetCodeInfo()->GetMethodDesc(); TADDR startAddress = pCf->GetCodeInfo()->GetStartAddress(); if (InlinedCallFrame::FrameHasActiveCall(pThread->m_pFrame)) { // When unwinding an exception in ReadyToRun, the JIT_PInvokeEnd helper which unlinks the ICF from // the thread will be skipped. This is because unlike jitted code, each pinvoke is wrapped by calls // to the JIT_PInvokeBegin and JIT_PInvokeEnd helpers, which push and pop the ICF on the thread. The // ICF is not linked at the method prolog and unlinked at the epilog when running R2R code. Since the // JIT_PInvokeEnd helper will be skipped, we need to unlink the ICF here. If the executing method // has another pinvoke, it will re-link the ICF again when the JIT_PInvokeBegin helper is called. // Check that the InlinedCallFrame is in the method with the exception handler. There can be other // InlinedCallFrame somewhere up the call chain that is not related to the current exception // handling. #ifdef DEBUG TADDR handlerFrameSP = pCf->GetRegisterSet()->SP; #endif // DEBUG // Find the ESP of the caller of the method with the exception handler. bool unwindSuccess = pCf->GetCodeManager()->UnwindStackFrame(pCf->GetRegisterSet(), pCf->GetCodeInfo(), pCf->GetCodeManagerFlags(), pCf->GetCodeManState(), NULL /* StackwalkCacheUnwindInfo* */); _ASSERTE(unwindSuccess); if (((TADDR)pThread->m_pFrame < pCf->GetRegisterSet()->SP) && ExecutionManager::IsReadyToRunCode(((InlinedCallFrame*)pThread->m_pFrame)->m_pCallerReturnAddress)) { _ASSERTE((TADDR)pThread->m_pFrame >= handlerFrameSP); pThread->m_pFrame->Pop(pThread); } } // save esp so that endcatch can restore it (it always restores, so want correct value) ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); pExInfo->m_dEsp = (LPVOID)context.GetSP(); LOG((LF_EH, LL_INFO1000, "ResumeAtJitEH: current m_dEsp set to %p\n", context.GetSP())); PVOID dEsp = GetCurrentSP(); if (!unwindStack) { // If we don't want to unwind the stack, then the guard page had better not be gone! _ASSERTE(pThread->DetermineIfGuardPagePresent()); // so down below won't really update esp context.SetSP(dEsp); pExInfo->m_pShadowSP = pShadowSP; // so that endcatch can zero it back if (pShadowSP) { *pShadowSP = (size_t)dEsp; } } else { // so shadow SP has the real SP as we are going to unwind the stack dEsp = (LPVOID)context.GetSP(); // BEGIN: pExInfo->UnwindExInfo(dEsp); ExInfo *pPrevNestedInfo = pExInfo->m_pPrevNestedInfo; while (pPrevNestedInfo && pPrevNestedInfo->m_StackAddress < dEsp) { LOG((LF_EH, LL_INFO1000, "ResumeAtJitEH: popping nested ExInfo at 0x%p\n", pPrevNestedInfo->m_StackAddress)); pPrevNestedInfo->DestroyExceptionHandle(); pPrevNestedInfo->m_StackTraceInfo.FreeStackTrace(); #ifdef DEBUGGING_SUPPORTED if (g_pDebugInterface != NULL) { g_pDebugInterface->DeleteInterceptContext(pPrevNestedInfo->m_DebuggerExState.GetDebuggerInterceptContext()); } #endif // DEBUGGING_SUPPORTED pPrevNestedInfo = pPrevNestedInfo->m_pPrevNestedInfo; } pExInfo->m_pPrevNestedInfo = pPrevNestedInfo; _ASSERTE(pExInfo->m_pPrevNestedInfo == 0 || pExInfo->m_pPrevNestedInfo->m_StackAddress >= dEsp); // Before we unwind the SEH records, get the Frame from the top-most nested exception record. Frame* pNestedFrame = GetCurrFrame(FindNestedEstablisherFrame(GetCurrentSEHRecord())); PopNestedExceptionRecords((LPVOID)(size_t)dEsp); EXCEPTION_REGISTRATION_RECORD* pNewBottomMostHandler = GetCurrentSEHRecord(); pExInfo->m_pShadowSP = pShadowSP; // The context and exception record are no longer any good. _ASSERTE(pExInfo->m_pContext < dEsp); // It must be off the top of the stack. pExInfo->m_pContext = 0; // Whack it. pExInfo->m_pExceptionRecord = 0; pExInfo->m_pExceptionPointers = 0; // We're going to put one nested record back on the stack before we resume. This is // where it goes. NestedHandlerExRecord *pNestedHandlerExRecord = (NestedHandlerExRecord*)((BYTE*)dEsp - ALIGN_UP(sizeof(NestedHandlerExRecord), STACK_ALIGN_SIZE)); // The point of no return. The next statement starts scribbling on the stack. It's // deep enough that we won't hit our own locals. (That's important, 'cuz we're still // using them.) // _ASSERTE(dEsp > &pCf); pNestedHandlerExRecord->m_handlerInfo.m_hThrowable=NULL; // This is random memory. Handle // must be initialized to null before // calling Init(), as Init() will try // to free any old handle. pNestedHandlerExRecord->Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, pNestedFrame); INSTALL_EXCEPTION_HANDLING_RECORD(&(pNestedHandlerExRecord->m_ExReg)); context.SetSP(pNestedHandlerExRecord); // We might have moved the bottommost handler. The nested record itself is never // the bottom most handler -- it's pushed after the fact. So we have to make the // bottom-most handler the one BEFORE the nested record. if (pExInfo->m_pBottomMostHandler < pNewBottomMostHandler) { STRESS_LOG3(LF_EH, LL_INFO10000, "ResumeAtJitEH: setting ExInfo:0x%p m_pBottomMostHandler from 0x%p to 0x%p\n", pExInfo, pExInfo->m_pBottomMostHandler, pNewBottomMostHandler); pExInfo->m_pBottomMostHandler = pNewBottomMostHandler; } if (pShadowSP) { *pShadowSP = context.GetSP(); } } STRESS_LOG3(LF_EH, LL_INFO100, "ResumeAtJitEH: resuming at EIP = %p ESP = %p EBP = %p\n", context.Eip, context.GetSP(), context.GetFP()); #ifdef STACK_GUARDS_DEBUG // We are transitioning back to managed code, so ensure that we are in // SO-tolerant mode before we do so. RestoreSOToleranceState(); #endif // we want this to happen as late as possible but certainly after the notification // that the handle for the current ExInfo has been freed has been delivered pExInfo->m_EHClauseInfo.SetManagedCodeEntered(TRUE); ETW::ExceptionLog::ExceptionCatchBegin(pMethodDesc, (PVOID)startAddress); ResumeAtJitEHHelper(&context); UNREACHABLE_MSG("Should never return from ResumeAtJitEHHelper!"); // we do not set pExInfo->m_EHClauseInfo.m_fManagedCodeEntered = FALSE here, // that happens when the catch clause calls back to COMPlusEndCatch // we don't return to this point so it would be moot (see unreachable_msg above) } #if defined(_MSC_VER) #pragma warning(pop) #endif // Must be in a separate function because INSTALL_COMPLUS_EXCEPTION_HANDLER has a filter int CallJitEHFilterWorker(size_t *pShadowSP, EHContext *pContext) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_COOPERATIVE; int retVal = EXCEPTION_CONTINUE_SEARCH; BEGIN_CALL_TO_MANAGED(); retVal = CallJitEHFilterHelper(pShadowSP, pContext); END_CALL_TO_MANAGED(); return retVal; } int CallJitEHFilter(CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel, OBJECTREF thrownObj) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_COOPERATIVE; int retVal = EXCEPTION_CONTINUE_SEARCH; size_t * pShadowSP = NULL; EHContext context; context.Setup(PCODE(startPC + EHClausePtr->FilterOffset), pCf->GetRegisterSet()); size_t * pEndFilter = NULL; // Write pCf->GetCodeManager()->FixContext(ICodeManager::FILTER_CONTEXT, &context, pCf->GetCodeInfo(), EHClausePtr->FilterOffset, nestingLevel, thrownObj, pCf->GetCodeManState(), &pShadowSP, &pEndFilter); // End of the filter is the same as start of handler if (pEndFilter) { *pEndFilter = EHClausePtr->HandlerStartPC; } // ExceptionFilterFrame serves two purposes: // // 1. It serves as a frame that stops the managed search for handler // if we fault in the filter. ThrowCallbackType.pTopFrame is going point // to this frame during search for exception handler inside filter. // The search for handler needs a frame to stop. If we had no frame here, // the exceptions in filters would not be swallowed correctly since we would // walk past the EX_TRY/EX_CATCH block in COMPlusThrowCallbackHelper. // // 2. It allows setting of SHADOW_SP_FILTER_DONE flag in UnwindFrames() // if we fault in the filter. We have to set this flag together with unwinding // of the filter frame. Using a regular C++ holder to clear this flag here would cause // GC holes. The stack would be in inconsistent state when we trigger gc just before // returning from UnwindFrames. FrameWithCookie<ExceptionFilterFrame> exceptionFilterFrame(pShadowSP); ETW::ExceptionLog::ExceptionFilterBegin(pCf->GetCodeInfo()->GetMethodDesc(), (PVOID)pCf->GetCodeInfo()->GetStartAddress()); retVal = CallJitEHFilterWorker(pShadowSP, &context); ETW::ExceptionLog::ExceptionFilterEnd(); exceptionFilterFrame.Pop(); return retVal; } void CallJitEHFinally(CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel) { WRAPPER_NO_CONTRACT; EHContext context; context.Setup(PCODE(startPC + EHClausePtr->HandlerStartPC), pCf->GetRegisterSet()); size_t * pShadowSP = NULL; // Write Esp to *pShadowSP before jumping to handler size_t * pFinallyEnd = NULL; pCf->GetCodeManager()->FixContext( ICodeManager::FINALLY_CONTEXT, &context, pCf->GetCodeInfo(), EHClausePtr->HandlerStartPC, nestingLevel, ObjectToOBJECTREF((Object *) NULL), pCf->GetCodeManState(), &pShadowSP, &pFinallyEnd); if (pFinallyEnd) { *pFinallyEnd = EHClausePtr->HandlerEndPC; } ETW::ExceptionLog::ExceptionFinallyBegin(pCf->GetCodeInfo()->GetMethodDesc(), (PVOID)pCf->GetCodeInfo()->GetStartAddress()); CallJitEHFinallyHelper(pShadowSP, &context); ETW::ExceptionLog::ExceptionFinallyEnd(); // // Update the registers using new context // // This is necessary to reflect GC pointer changes during the middle of a unwind inside a // finally clause, because: // 1. GC won't see the part of stack inside try (which has thrown an exception) that is already // unwinded and thus GC won't update GC pointers for this portion of the stack, but rather the // call stack in finally. // 2. upon return of finally, the unwind process continues and unwinds stack based on the part // of stack inside try and won't see the updated values in finally. // As a result, we need to manually update the context using register values upon return of finally // // Note that we only update the registers for finally clause because // 1. For filter handlers, stack walker is able to see the whole stack (including the try part) // with the help of ExceptionFilterFrame as filter handlers are called in first pass // 2. For catch handlers, the current unwinding is already finished // context.UpdateFrame(pCf->GetRegisterSet()); // This does not need to be guarded by a holder because the frame is dead if an exception gets thrown. Filters are different // since they are run in the first pass, so we must update the shadowSP reset in CallJitEHFilter. if (pShadowSP) { *pShadowSP = 0; // reset the shadowSP to 0 } } #if defined(_MSC_VER) #pragma warning (default : 4731) #endif //===================================================================== // ********************************************************************* BOOL ComPlusFrameSEH(EXCEPTION_REGISTRATION_RECORD* pEHR) { LIMITED_METHOD_CONTRACT; return ((LPVOID)pEHR->Handler == (LPVOID)COMPlusFrameHandler || (LPVOID)pEHR->Handler == (LPVOID)COMPlusNestedExceptionHandler); } // //------------------------------------------------------------------------- // This is installed when we call COMPlusFrameHandler to provide a bound to // determine when are within a nested exception //------------------------------------------------------------------------- EXCEPTION_HANDLER_IMPL(COMPlusNestedExceptionHandler) { WRAPPER_NO_CONTRACT; if (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND)) { LOG((LF_EH, LL_INFO100, " COMPlusNestedHandler(unwind) with %x at %x\n", pExceptionRecord->ExceptionCode, pContext ? GetIP(pContext) : 0)); // We're unwinding past a nested exception record, which means that we've thrown // a new exception out of a region in which we're handling a previous one. The // previous exception is overridden -- and needs to be unwound. // The preceding is ALMOST true. There is one more case, where we use setjmp/longjmp // from withing a nested handler. We won't have a nested exception in that case -- just // the unwind. Thread* pThread = GetThread(); ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo); ExInfo* pPrevNestedInfo = pExInfo->m_pPrevNestedInfo; if (pPrevNestedInfo == &((NestedHandlerExRecord*)pEstablisherFrame)->m_handlerInfo) { _ASSERTE(pPrevNestedInfo); LOG((LF_EH, LL_INFO100, "COMPlusNestedExceptionHandler: PopExInfo(): popping nested ExInfo at 0x%p\n", pPrevNestedInfo)); pPrevNestedInfo->DestroyExceptionHandle(); pPrevNestedInfo->m_StackTraceInfo.FreeStackTrace(); #ifdef DEBUGGING_SUPPORTED if (g_pDebugInterface != NULL) { g_pDebugInterface->DeleteInterceptContext(pPrevNestedInfo->m_DebuggerExState.GetDebuggerInterceptContext()); } #endif // DEBUGGING_SUPPORTED pExInfo->m_pPrevNestedInfo = pPrevNestedInfo->m_pPrevNestedInfo; } else { // The whacky setjmp/longjmp case. Nothing to do. } } else { LOG((LF_EH, LL_INFO100, " InCOMPlusNestedHandler with %x at %x\n", pExceptionRecord->ExceptionCode, pContext ? GetIP(pContext) : 0)); } // There is a nasty "gotcha" in the way exception unwinding, finally's, and nested exceptions // interact. Here's the scenario ... it involves two exceptions, one normal one, and one // raised in a finally. // // The first exception occurs, and is caught by some handler way up the stack. That handler // calls RtlUnwind -- and handlers that didn't catch this first exception are called again, with // the UNWIND flag set. If, one of the handlers throws an exception during // unwind (like, a throw from a finally) -- then that same handler is not called during // the unwind pass of the second exception. [ASIDE: It is called on first-pass.] // // What that means is -- the COMPlusExceptionHandler, can't count on unwinding itself correctly // if an exception is thrown from a finally. Instead, it relies on the NestedExceptionHandler // that it pushes for this. // EXCEPTION_DISPOSITION retval = EXCEPTION_HANDLER_FWD(COMPlusFrameHandler); LOG((LF_EH, LL_INFO100, "Leaving COMPlusNestedExceptionHandler with %d\n", retval)); return retval; } EXCEPTION_REGISTRATION_RECORD *FindNestedEstablisherFrame(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame) { LIMITED_METHOD_CONTRACT; while (pEstablisherFrame->Handler != (PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler) { pEstablisherFrame = pEstablisherFrame->Next; _ASSERTE(pEstablisherFrame != EXCEPTION_CHAIN_END); // should always find one } return pEstablisherFrame; } EXCEPTION_HANDLER_IMPL(FastNExportExceptHandler) { WRAPPER_NO_CONTRACT; // Most of our logic is in commin with COMPlusFrameHandler. EXCEPTION_DISPOSITION retval = EXCEPTION_HANDLER_FWD(COMPlusFrameHandler); #ifdef _DEBUG // If the exception is escaping the last CLR personality routine on the stack, // then state a flag on the thread to indicate so. if (retval == ExceptionContinueSearch) { SetReversePInvokeEscapingUnhandledExceptionStatus(IS_UNWINDING(pExceptionRecord->ExceptionFlags), pEstablisherFrame); } #endif // _DEBUG return retval; } #ifdef FEATURE_COMINTEROP // The reverse COM interop path needs to be sure to pop the ComMethodFrame that is pushed, but we do not want // to have an additional FS:0 handler between the COM callsite and the call into managed. So we push this // FS:0 handler, which will defer to the usual COMPlusFrameHandler and then perform the cleanup of the // ComMethodFrame, if needed. EXCEPTION_HANDLER_IMPL(COMPlusFrameHandlerRevCom) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_ANY; // Defer to COMPlusFrameHandler EXCEPTION_DISPOSITION result = EXCEPTION_HANDLER_FWD(COMPlusFrameHandler); if (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND)) { // Do cleanup as needed ComMethodFrame::DoSecondPassHandlerCleanup(GetCurrFrame(pEstablisherFrame)); } return result; } #endif // FEATURE_COMINTEROP #endif // !DACCESS_COMPILE #endif // !FEATURE_EH_FUNCLETS PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(CONTEXT * pContext) { LIMITED_METHOD_DAC_CONTRACT; UINT_PTR stackSlot = pContext->Ebp + REDIRECTSTUB_EBP_OFFSET_CONTEXT; PTR_PTR_CONTEXT ppContext = dac_cast<PTR_PTR_CONTEXT>((TADDR)stackSlot); return *ppContext; } #ifndef DACCESS_COMPILE LONG CLRNoCatchHandler(EXCEPTION_POINTERS* pExceptionInfo, PVOID pv) { #ifndef FEATURE_EH_FUNCLETS WRAPPER_NO_CONTRACT; STATIC_CONTRACT_ENTRY_POINT; LONG result = EXCEPTION_CONTINUE_SEARCH; // This function can be called during the handling of a SO //BEGIN_ENTRYPOINT_VOIDRET; result = CLRVectoredExceptionHandler(pExceptionInfo); if (EXCEPTION_EXECUTE_HANDLER == result) { result = EXCEPTION_CONTINUE_SEARCH; } //END_ENTRYPOINT_VOIDRET; return result; #else // !FEATURE_EH_FUNCLETS return EXCEPTION_CONTINUE_SEARCH; #endif // !FEATURE_EH_FUNCLETS } // Returns TRUE if caller should resume execution. BOOL AdjustContextForVirtualStub( EXCEPTION_RECORD *pExceptionRecord, CONTEXT *pContext) { LIMITED_METHOD_CONTRACT; Thread * pThread = GetThreadNULLOk(); // We may not have a managed thread object. Example is an AV on the helper thread. // (perhaps during StubManager::IsStub) if (pThread == NULL) { return FALSE; } PCODE f_IP = GetIP(pContext); VirtualCallStubManager::StubKind sk; VirtualCallStubManager *pMgr = VirtualCallStubManager::FindStubManager(f_IP, &sk); if (sk == VirtualCallStubManager::SK_DISPATCH) { if (*PTR_WORD(f_IP) != X86_INSTR_CMP_IND_ECX_IMM32) { _ASSERTE(!"AV in DispatchStub at unknown instruction"); return FALSE; } } else if (sk == VirtualCallStubManager::SK_RESOLVE) { if (*PTR_WORD(f_IP) != X86_INSTR_MOV_EAX_ECX_IND) { _ASSERTE(!"AV in ResolveStub at unknown instruction"); return FALSE; } SetSP(pContext, dac_cast<PCODE>(dac_cast<PTR_BYTE>(GetSP(pContext)) + sizeof(void*))); // rollback push eax } else { return FALSE; } PCODE callsite = *dac_cast<PTR_PCODE>(GetSP(pContext)); if (pExceptionRecord != NULL) { pExceptionRecord->ExceptionAddress = (PVOID)callsite; } SetIP(pContext, callsite); #if defined(GCCOVER_TOLERATE_SPURIOUS_AV) // Modify LastAVAddress saved in thread to distinguish between fake & real AV // See comments in IsGcMarker in file excep.cpp for more details pThread->SetLastAVAddress((LPVOID)GetIP(pContext)); #endif // defined(GCCOVER_TOLERATE_SPURIOUS_AV) // put ESP back to what it was before the call. TADDR sp = GetSP(pContext) + sizeof(void*); #ifndef UNIX_X86_ABI // set the ESP to what it would be after the call (remove pushed arguments) size_t stackArgumentsSize; if (sk == VirtualCallStubManager::SK_DISPATCH) { ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE(); DispatchHolder *holder = DispatchHolder::FromDispatchEntry(f_IP); MethodTable *pMT = (MethodTable*)holder->stub()->expectedMT(); DispatchToken token(VirtualCallStubManager::GetTokenFromStubQuick(pMgr, f_IP, sk)); MethodDesc* pMD = VirtualCallStubManager::GetRepresentativeMethodDescFromToken(token, pMT); stackArgumentsSize = pMD->SizeOfArgStack(); } else { // Compute the stub entry address from the address of failure (location of dereferencing of "this" pointer) ResolveHolder *holder = ResolveHolder::FromResolveEntry(f_IP - ResolveStub::offsetOfThisDeref()); stackArgumentsSize = holder->stub()->stackArgumentsSize(); } sp += stackArgumentsSize; #endif // UNIX_X86_ABI SetSP(pContext, dac_cast<PCODE>(dac_cast<PTR_BYTE>(sp))); return TRUE; } #endif // !DACCESS_COMPILE
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/gc/softwarewritewatch.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "common.h" #include "gcenv.h" #include "env/gcenv.os.h" #include "softwarewritewatch.h" #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP #ifndef DACCESS_COMPILE static_assert((static_cast<size_t>(1) << SOFTWARE_WRITE_WATCH_AddressToTableByteIndexShift) == WRITE_WATCH_UNIT_SIZE, "Unexpected WRITE_WATCH_UNIT_SIZE"); extern "C" { uint8_t *g_gc_sw_ww_table = nullptr; bool g_gc_sw_ww_enabled_for_gc_heap = false; } void SoftwareWriteWatch::StaticClose() { if (GetTable() == nullptr) { return; } g_gc_sw_ww_enabled_for_gc_heap = false; g_gc_sw_ww_table = nullptr; } bool SoftwareWriteWatch::GetDirtyFromBlock( uint8_t *block, uint8_t *firstPageAddressInBlock, size_t startByteIndex, size_t endByteIndex, void **dirtyPages, size_t *dirtyPageIndexRef, size_t dirtyPageCount, bool clearDirty) { assert(block != nullptr); assert(ALIGN_DOWN(block, sizeof(size_t)) == block); assert(firstPageAddressInBlock == reinterpret_cast<uint8_t *>(GetPageAddress(block - GetTable()))); assert(startByteIndex < endByteIndex); assert(endByteIndex <= sizeof(size_t)); assert(dirtyPages != nullptr); assert(dirtyPageIndexRef != nullptr); size_t &dirtyPageIndex = *dirtyPageIndexRef; assert(dirtyPageIndex < dirtyPageCount); size_t dirtyBytes = *reinterpret_cast<size_t *>(block); if (dirtyBytes == 0) { return true; } if (startByteIndex != 0) { size_t numLowBitsToClear = startByteIndex * 8; dirtyBytes >>= numLowBitsToClear; dirtyBytes <<= numLowBitsToClear; } if (endByteIndex != sizeof(size_t)) { size_t numHighBitsToClear = (sizeof(size_t) - endByteIndex) * 8; dirtyBytes <<= numHighBitsToClear; dirtyBytes >>= numHighBitsToClear; } while (dirtyBytes != 0) { DWORD bitIndex; static_assert(sizeof(size_t) <= 8, "Unexpected sizeof(size_t)"); if (sizeof(size_t) == 8) { BitScanForward64(&bitIndex, static_cast<DWORD64>(dirtyBytes)); } else { BitScanForward(&bitIndex, static_cast<DWORD>(dirtyBytes)); } // Each byte is only ever set to 0 or 0xff assert(bitIndex % 8 == 0); size_t byteMask = static_cast<size_t>(0xff) << bitIndex; assert((dirtyBytes & byteMask) == byteMask); dirtyBytes ^= byteMask; DWORD byteIndex = bitIndex / 8; if (clearDirty) { // Clear only the bytes for which pages are recorded as dirty block[byteIndex] = 0; } void *pageAddress = firstPageAddressInBlock + byteIndex * WRITE_WATCH_UNIT_SIZE; assert(pageAddress >= GetHeapStartAddress()); assert(pageAddress < GetHeapEndAddress()); assert(dirtyPageIndex < dirtyPageCount); dirtyPages[dirtyPageIndex] = pageAddress; ++dirtyPageIndex; if (dirtyPageIndex == dirtyPageCount) { return false; } } return true; } void SoftwareWriteWatch::GetDirty( void *baseAddress, size_t regionByteSize, void **dirtyPages, size_t *dirtyPageCountRef, bool clearDirty, bool isRuntimeSuspended) { VerifyCreated(); VerifyMemoryRegion(baseAddress, regionByteSize); assert(dirtyPages != nullptr); assert(dirtyPageCountRef != nullptr); size_t dirtyPageCount = *dirtyPageCountRef; if (dirtyPageCount == 0) { return; } if (!isRuntimeSuspended) { // When a page is marked as dirty, a memory barrier is not issued after the write most of the time. Issue a memory // barrier on all active threads of the process now to make recent changes to dirty state visible to this thread. GCToOSInterface::FlushProcessWriteBuffers(); } uint8_t *tableRegionStart; size_t tableRegionByteSize; TranslateToTableRegion(baseAddress, regionByteSize, &tableRegionStart, &tableRegionByteSize); uint8_t *tableRegionEnd = tableRegionStart + tableRegionByteSize; uint8_t *blockStart = ALIGN_DOWN(tableRegionStart, sizeof(size_t)); assert(blockStart >= GetUntranslatedTable()); uint8_t *blockEnd = ALIGN_UP(tableRegionEnd, sizeof(size_t)); assert(blockEnd <= GetUntranslatedTableEnd()); uint8_t *fullBlockEnd = ALIGN_DOWN(tableRegionEnd, sizeof(size_t)); size_t dirtyPageIndex = 0; uint8_t *currentBlock = blockStart; uint8_t *firstPageAddressInCurrentBlock = reinterpret_cast<uint8_t *>(GetPageAddress(currentBlock - GetTable())); do { if (blockStart == fullBlockEnd) { if (GetDirtyFromBlock( currentBlock, firstPageAddressInCurrentBlock, tableRegionStart - blockStart, tableRegionEnd - fullBlockEnd, dirtyPages, &dirtyPageIndex, dirtyPageCount, clearDirty)) { *dirtyPageCountRef = dirtyPageIndex; } break; } if (tableRegionStart != blockStart) { if (!GetDirtyFromBlock( currentBlock, firstPageAddressInCurrentBlock, tableRegionStart - blockStart, sizeof(size_t), dirtyPages, &dirtyPageIndex, dirtyPageCount, clearDirty)) { break; } currentBlock += sizeof(size_t); firstPageAddressInCurrentBlock += sizeof(size_t) * WRITE_WATCH_UNIT_SIZE; } while (currentBlock < fullBlockEnd) { if (!GetDirtyFromBlock( currentBlock, firstPageAddressInCurrentBlock, 0, sizeof(size_t), dirtyPages, &dirtyPageIndex, dirtyPageCount, clearDirty)) { break; } currentBlock += sizeof(size_t); firstPageAddressInCurrentBlock += sizeof(size_t) * WRITE_WATCH_UNIT_SIZE; } if (currentBlock < fullBlockEnd) { break; } if (tableRegionEnd != fullBlockEnd && !GetDirtyFromBlock( currentBlock, firstPageAddressInCurrentBlock, 0, tableRegionEnd - fullBlockEnd, dirtyPages, &dirtyPageIndex, dirtyPageCount, clearDirty)) { break; } *dirtyPageCountRef = dirtyPageIndex; } while (false); if (!isRuntimeSuspended && clearDirty && dirtyPageIndex != 0) { // When dirtying a page, the dirty state of the page is first checked to see if the page is already dirty. If already // dirty, the write to mark it as dirty is skipped. So, when the dirty state of a page is cleared, we need to make sure // the cleared state is visible to other threads that may dirty the page, before marking through objects in the page, so // that the GC will not miss marking through dirtied objects in the page. Issue a memory barrier on all active threads // of the process now. MemoryBarrier(); // flush writes from this thread first to guarantee ordering GCToOSInterface::FlushProcessWriteBuffers(); } } #endif // !DACCESS_COMPILE #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "common.h" #include "gcenv.h" #include "env/gcenv.os.h" #include "softwarewritewatch.h" #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP #ifndef DACCESS_COMPILE static_assert((static_cast<size_t>(1) << SOFTWARE_WRITE_WATCH_AddressToTableByteIndexShift) == WRITE_WATCH_UNIT_SIZE, "Unexpected WRITE_WATCH_UNIT_SIZE"); extern "C" { uint8_t *g_gc_sw_ww_table = nullptr; bool g_gc_sw_ww_enabled_for_gc_heap = false; } void SoftwareWriteWatch::StaticClose() { if (GetTable() == nullptr) { return; } g_gc_sw_ww_enabled_for_gc_heap = false; g_gc_sw_ww_table = nullptr; } bool SoftwareWriteWatch::GetDirtyFromBlock( uint8_t *block, uint8_t *firstPageAddressInBlock, size_t startByteIndex, size_t endByteIndex, void **dirtyPages, size_t *dirtyPageIndexRef, size_t dirtyPageCount, bool clearDirty) { assert(block != nullptr); assert(ALIGN_DOWN(block, sizeof(size_t)) == block); assert(firstPageAddressInBlock == reinterpret_cast<uint8_t *>(GetPageAddress(block - GetTable()))); assert(startByteIndex < endByteIndex); assert(endByteIndex <= sizeof(size_t)); assert(dirtyPages != nullptr); assert(dirtyPageIndexRef != nullptr); size_t &dirtyPageIndex = *dirtyPageIndexRef; assert(dirtyPageIndex < dirtyPageCount); size_t dirtyBytes = *reinterpret_cast<size_t *>(block); if (dirtyBytes == 0) { return true; } if (startByteIndex != 0) { size_t numLowBitsToClear = startByteIndex * 8; dirtyBytes >>= numLowBitsToClear; dirtyBytes <<= numLowBitsToClear; } if (endByteIndex != sizeof(size_t)) { size_t numHighBitsToClear = (sizeof(size_t) - endByteIndex) * 8; dirtyBytes <<= numHighBitsToClear; dirtyBytes >>= numHighBitsToClear; } while (dirtyBytes != 0) { DWORD bitIndex; static_assert(sizeof(size_t) <= 8, "Unexpected sizeof(size_t)"); if (sizeof(size_t) == 8) { BitScanForward64(&bitIndex, static_cast<DWORD64>(dirtyBytes)); } else { BitScanForward(&bitIndex, static_cast<DWORD>(dirtyBytes)); } // Each byte is only ever set to 0 or 0xff assert(bitIndex % 8 == 0); size_t byteMask = static_cast<size_t>(0xff) << bitIndex; assert((dirtyBytes & byteMask) == byteMask); dirtyBytes ^= byteMask; DWORD byteIndex = bitIndex / 8; if (clearDirty) { // Clear only the bytes for which pages are recorded as dirty block[byteIndex] = 0; } void *pageAddress = firstPageAddressInBlock + byteIndex * WRITE_WATCH_UNIT_SIZE; assert(pageAddress >= GetHeapStartAddress()); assert(pageAddress < GetHeapEndAddress()); assert(dirtyPageIndex < dirtyPageCount); dirtyPages[dirtyPageIndex] = pageAddress; ++dirtyPageIndex; if (dirtyPageIndex == dirtyPageCount) { return false; } } return true; } void SoftwareWriteWatch::GetDirty( void *baseAddress, size_t regionByteSize, void **dirtyPages, size_t *dirtyPageCountRef, bool clearDirty, bool isRuntimeSuspended) { VerifyCreated(); VerifyMemoryRegion(baseAddress, regionByteSize); assert(dirtyPages != nullptr); assert(dirtyPageCountRef != nullptr); size_t dirtyPageCount = *dirtyPageCountRef; if (dirtyPageCount == 0) { return; } if (!isRuntimeSuspended) { // When a page is marked as dirty, a memory barrier is not issued after the write most of the time. Issue a memory // barrier on all active threads of the process now to make recent changes to dirty state visible to this thread. GCToOSInterface::FlushProcessWriteBuffers(); } uint8_t *tableRegionStart; size_t tableRegionByteSize; TranslateToTableRegion(baseAddress, regionByteSize, &tableRegionStart, &tableRegionByteSize); uint8_t *tableRegionEnd = tableRegionStart + tableRegionByteSize; uint8_t *blockStart = ALIGN_DOWN(tableRegionStart, sizeof(size_t)); assert(blockStart >= GetUntranslatedTable()); uint8_t *blockEnd = ALIGN_UP(tableRegionEnd, sizeof(size_t)); assert(blockEnd <= GetUntranslatedTableEnd()); uint8_t *fullBlockEnd = ALIGN_DOWN(tableRegionEnd, sizeof(size_t)); size_t dirtyPageIndex = 0; uint8_t *currentBlock = blockStart; uint8_t *firstPageAddressInCurrentBlock = reinterpret_cast<uint8_t *>(GetPageAddress(currentBlock - GetTable())); do { if (blockStart == fullBlockEnd) { if (GetDirtyFromBlock( currentBlock, firstPageAddressInCurrentBlock, tableRegionStart - blockStart, tableRegionEnd - fullBlockEnd, dirtyPages, &dirtyPageIndex, dirtyPageCount, clearDirty)) { *dirtyPageCountRef = dirtyPageIndex; } break; } if (tableRegionStart != blockStart) { if (!GetDirtyFromBlock( currentBlock, firstPageAddressInCurrentBlock, tableRegionStart - blockStart, sizeof(size_t), dirtyPages, &dirtyPageIndex, dirtyPageCount, clearDirty)) { break; } currentBlock += sizeof(size_t); firstPageAddressInCurrentBlock += sizeof(size_t) * WRITE_WATCH_UNIT_SIZE; } while (currentBlock < fullBlockEnd) { if (!GetDirtyFromBlock( currentBlock, firstPageAddressInCurrentBlock, 0, sizeof(size_t), dirtyPages, &dirtyPageIndex, dirtyPageCount, clearDirty)) { break; } currentBlock += sizeof(size_t); firstPageAddressInCurrentBlock += sizeof(size_t) * WRITE_WATCH_UNIT_SIZE; } if (currentBlock < fullBlockEnd) { break; } if (tableRegionEnd != fullBlockEnd && !GetDirtyFromBlock( currentBlock, firstPageAddressInCurrentBlock, 0, tableRegionEnd - fullBlockEnd, dirtyPages, &dirtyPageIndex, dirtyPageCount, clearDirty)) { break; } *dirtyPageCountRef = dirtyPageIndex; } while (false); if (!isRuntimeSuspended && clearDirty && dirtyPageIndex != 0) { // When dirtying a page, the dirty state of the page is first checked to see if the page is already dirty. If already // dirty, the write to mark it as dirty is skipped. So, when the dirty state of a page is cleared, we need to make sure // the cleared state is visible to other threads that may dirty the page, before marking through objects in the page, so // that the GC will not miss marking through dirtied objects in the page. Issue a memory barrier on all active threads // of the process now. MemoryBarrier(); // flush writes from this thread first to guarantee ordering GCToOSInterface::FlushProcessWriteBuffers(); } } #endif // !DACCESS_COMPILE #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/pal/tests/palsuite/c_runtime/vprintf/test18/test18.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test18.c ** ** Purpose: Test #18 for the vprintf function. Tests the uppercase ** shorthand notation double specifier (%G) ** ** **==========================================================================*/ #include <palsuite.h> #include "../vprintf.h" PALTEST(c_runtime_vprintf_test18_paltest_vprintf_test18, "c_runtime/vprintf/test18/paltest_vprintf_test18") { double val = 2560.001; double neg = -2560.001; if (PAL_Initialize(argc, argv)) { return FAIL; } DoDoubleTest("foo %G", val, "foo 2560", "foo 2560"); DoDoubleTest("foo %lG", val, "foo 2560", "foo 2560"); DoDoubleTest("foo %hG", val, "foo 2560", "foo 2560"); DoDoubleTest("foo %LG", val, "foo 2560", "foo 2560"); DoDoubleTest("foo %I64G", val, "foo 2560", "foo 2560"); DoDoubleTest("foo %5G", val, "foo 2560", "foo 2560"); DoDoubleTest("foo %-5G", val, "foo 2560 ", "foo 2560 "); DoDoubleTest("foo %.1G", val, "foo 3E+003", "foo 3E+03"); DoDoubleTest("foo %.2G", val, "foo 2.6E+003", "foo 2.6E+03"); DoDoubleTest("foo %.12G", val, "foo 2560.001", "foo 2560.001"); DoDoubleTest("foo %06G", val, "foo 002560", "foo 002560"); DoDoubleTest("foo %#G", val, "foo 2560.00", "foo 2560.00"); DoDoubleTest("foo %+G", val, "foo +2560", "foo +2560"); DoDoubleTest("foo % G", val, "foo 2560", "foo 2560"); DoDoubleTest("foo %+G", neg, "foo -2560", "foo -2560"); DoDoubleTest("foo % G", neg, "foo -2560", "foo -2560"); PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test18.c ** ** Purpose: Test #18 for the vprintf function. Tests the uppercase ** shorthand notation double specifier (%G) ** ** **==========================================================================*/ #include <palsuite.h> #include "../vprintf.h" PALTEST(c_runtime_vprintf_test18_paltest_vprintf_test18, "c_runtime/vprintf/test18/paltest_vprintf_test18") { double val = 2560.001; double neg = -2560.001; if (PAL_Initialize(argc, argv)) { return FAIL; } DoDoubleTest("foo %G", val, "foo 2560", "foo 2560"); DoDoubleTest("foo %lG", val, "foo 2560", "foo 2560"); DoDoubleTest("foo %hG", val, "foo 2560", "foo 2560"); DoDoubleTest("foo %LG", val, "foo 2560", "foo 2560"); DoDoubleTest("foo %I64G", val, "foo 2560", "foo 2560"); DoDoubleTest("foo %5G", val, "foo 2560", "foo 2560"); DoDoubleTest("foo %-5G", val, "foo 2560 ", "foo 2560 "); DoDoubleTest("foo %.1G", val, "foo 3E+003", "foo 3E+03"); DoDoubleTest("foo %.2G", val, "foo 2.6E+003", "foo 2.6E+03"); DoDoubleTest("foo %.12G", val, "foo 2560.001", "foo 2560.001"); DoDoubleTest("foo %06G", val, "foo 002560", "foo 002560"); DoDoubleTest("foo %#G", val, "foo 2560.00", "foo 2560.00"); DoDoubleTest("foo %+G", val, "foo +2560", "foo +2560"); DoDoubleTest("foo % G", val, "foo 2560", "foo 2560"); DoDoubleTest("foo %+G", neg, "foo -2560", "foo -2560"); DoDoubleTest("foo % G", neg, "foo -2560", "foo -2560"); PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/pal/tests/palsuite/locale_info/WideCharToMultiByte/test3/test3.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test3.c ** ** Purpose: Tests that WideCharToMultiByte correctly handles the following ** error conditions: insufficient buffer space, invalid code pages, ** and invalid flags. ** ** **==========================================================================*/ #include <palsuite.h> PALTEST(locale_info_WideCharToMultiByte_test3_paltest_widechartomultibyte_test3, "locale_info/WideCharToMultiByte/test3/paltest_widechartomultibyte_test3") { char mbStr[128]; WCHAR wideStr[128]; int ret; int i; int k; BOOL bRet=TRUE; /* These codepages are currently supported by the PAL */ int codePages[] ={ CP_ACP, CP_UTF8 }; if (PAL_Initialize(argc, argv)) { return FAIL; } /* Go through all of the code pages */ for(i=0; i<(sizeof(codePages)/sizeof(int)); i++) { for (k=0; k<128; k++) { wideStr[k] = 'a'; mbStr[k] = 0; } wideStr[127] = 0; /* try with insufficient buffer space */ ret = WideCharToMultiByte(codePages[i], 0, wideStr, -1, mbStr, 10, NULL, NULL); if (ret != 0) { Trace("WideCharToMultiByte did not return an error!\n" "Expected return of 0, got %d for code page %d.\n", ret, codePages[i]); bRet = FALSE; } ret = GetLastError(); if (ret != ERROR_INSUFFICIENT_BUFFER) { Fail("WideCharToMultiByte set the last error to %u instead of " "ERROR_INSUFFICIENT_BUFFER for code page %d.\n", GetLastError(),codePages[i]); bRet = FALSE; } } /* Return failure if any of the code pages returned the wrong results */ if(!bRet) { return FAIL; } /* try with a wacky code page */ ret = WideCharToMultiByte(-1, 0, wideStr, -1, mbStr, 128, NULL, NULL); if (ret != 0) { Fail("WideCharToMultiByte did not return an error!\n" "Expected return of 0, got %d for invalid code page.\n", ret); } ret = GetLastError(); if (ret != ERROR_INVALID_PARAMETER) { Fail("WideCharToMultiByte set the last error to %u instead of " "ERROR_INVALID_PARAMETER for invalid code page -1.\n", GetLastError()); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test3.c ** ** Purpose: Tests that WideCharToMultiByte correctly handles the following ** error conditions: insufficient buffer space, invalid code pages, ** and invalid flags. ** ** **==========================================================================*/ #include <palsuite.h> PALTEST(locale_info_WideCharToMultiByte_test3_paltest_widechartomultibyte_test3, "locale_info/WideCharToMultiByte/test3/paltest_widechartomultibyte_test3") { char mbStr[128]; WCHAR wideStr[128]; int ret; int i; int k; BOOL bRet=TRUE; /* These codepages are currently supported by the PAL */ int codePages[] ={ CP_ACP, CP_UTF8 }; if (PAL_Initialize(argc, argv)) { return FAIL; } /* Go through all of the code pages */ for(i=0; i<(sizeof(codePages)/sizeof(int)); i++) { for (k=0; k<128; k++) { wideStr[k] = 'a'; mbStr[k] = 0; } wideStr[127] = 0; /* try with insufficient buffer space */ ret = WideCharToMultiByte(codePages[i], 0, wideStr, -1, mbStr, 10, NULL, NULL); if (ret != 0) { Trace("WideCharToMultiByte did not return an error!\n" "Expected return of 0, got %d for code page %d.\n", ret, codePages[i]); bRet = FALSE; } ret = GetLastError(); if (ret != ERROR_INSUFFICIENT_BUFFER) { Fail("WideCharToMultiByte set the last error to %u instead of " "ERROR_INSUFFICIENT_BUFFER for code page %d.\n", GetLastError(),codePages[i]); bRet = FALSE; } } /* Return failure if any of the code pages returned the wrong results */ if(!bRet) { return FAIL; } /* try with a wacky code page */ ret = WideCharToMultiByte(-1, 0, wideStr, -1, mbStr, 128, NULL, NULL); if (ret != 0) { Fail("WideCharToMultiByte did not return an error!\n" "Expected return of 0, got %d for invalid code page.\n", ret); } ret = GetLastError(); if (ret != ERROR_INVALID_PARAMETER) { Fail("WideCharToMultiByte set the last error to %u instead of " "ERROR_INVALID_PARAMETER for invalid code page -1.\n", GetLastError()); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/pal/tests/palsuite/c_runtime/isxdigit/test1/test1.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test1.c ** ** Purpose: Run through every possible character. For each time that ** isxdigit returns: ** 1, check through a list of the known hex characters to ensure that it ** is really a hex char. Also, when it returns 0, ensure that that character ** isn't a hex character. ** ** **==========================================================================*/ #include <palsuite.h> PALTEST(c_runtime_isxdigit_test1_paltest_isxdigit_test1, "c_runtime/isxdigit/test1/paltest_isxdigit_test1") { int i; /* Initialize the PAL */ if ( 0 != PAL_Initialize(argc, argv)) { return FAIL; } /* Loop through each character and call isxdigit for each character */ for (i=1; i<256; i++) { if (isxdigit(i) == 0) { if( ((i>=48) && (i<=57)) || ((i>=97) && (i<=102)) || ((i>=65) && (i<=70)) ) { Fail("ERROR: isxdigit() returns true for '%c' (%d)\n", i, i); } } else { if( ((i<48) && (i>58)) || ((i<97) && (i>102)) || ((i<65) && (i>70)) ) { Fail("ERROR: isxdigit() returns false for '%c' (%d)\n", i, i); } } } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test1.c ** ** Purpose: Run through every possible character. For each time that ** isxdigit returns: ** 1, check through a list of the known hex characters to ensure that it ** is really a hex char. Also, when it returns 0, ensure that that character ** isn't a hex character. ** ** **==========================================================================*/ #include <palsuite.h> PALTEST(c_runtime_isxdigit_test1_paltest_isxdigit_test1, "c_runtime/isxdigit/test1/paltest_isxdigit_test1") { int i; /* Initialize the PAL */ if ( 0 != PAL_Initialize(argc, argv)) { return FAIL; } /* Loop through each character and call isxdigit for each character */ for (i=1; i<256; i++) { if (isxdigit(i) == 0) { if( ((i>=48) && (i<=57)) || ((i>=97) && (i<=102)) || ((i>=65) && (i<=70)) ) { Fail("ERROR: isxdigit() returns true for '%c' (%d)\n", i, i); } } else { if( ((i<48) && (i>58)) || ((i<97) && (i>102)) || ((i<65) && (i>70)) ) { Fail("ERROR: isxdigit() returns false for '%c' (%d)\n", i, i); } } } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/vm/customattribute.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "common.h" #include "customattribute.h" #include "invokeutil.h" #include "method.hpp" #include "threads.h" #include "excep.h" #include "corerror.h" #include "classnames.h" #include "fcall.h" #include "assemblynative.hpp" #include "typeparse.h" #include "reflectioninvocation.h" #include "runtimehandles.h" #include "typestring.h" typedef InlineFactory<InlineSString<64>, 16> SStringFactory; /*static*/ TypeHandle Attribute::GetTypeForEnum(LPCUTF8 szEnumName, COUNT_T cbEnumName, DomainAssembly* pDomainAssembly) { CONTRACTL { PRECONDITION(CheckPointer(pDomainAssembly)); PRECONDITION(CheckPointer(szEnumName)); PRECONDITION(cbEnumName); THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; StackScratchBuffer buff; StackSString sszEnumName(SString::Utf8, szEnumName, cbEnumName); return TypeName::GetTypeUsingCASearchRules(sszEnumName.GetUTF8(buff), pDomainAssembly->GetAssembly()); } /*static*/ HRESULT Attribute::ParseCaType( CustomAttributeParser &ca, CaType* pCaType, DomainAssembly* pDomainAssembly, StackSString* ss) { WRAPPER_NO_CONTRACT; HRESULT hr = S_OK; IfFailGo(::ParseEncodedType(ca, pCaType)); if (pCaType->tag == SERIALIZATION_TYPE_ENUM || (pCaType->tag == SERIALIZATION_TYPE_SZARRAY && pCaType->arrayType == SERIALIZATION_TYPE_ENUM )) { TypeHandle th = Attribute::GetTypeForEnum(pCaType->szEnumName, pCaType->cEnumName, pDomainAssembly); if (!th.IsNull() && th.IsEnum()) { pCaType->enumType = (CorSerializationType)th.GetVerifierCorElementType(); // The assembly qualified name of th might not equal pCaType->szEnumName. // e.g. th could be "MyEnum, MyAssembly, Version=4.0.0.0" while // pCaType->szEnumName is "MyEnum, MyAssembly, Version=3.0.0.0" if (ss) { DWORD format = TypeString::FormatNamespace | TypeString::FormatFullInst | TypeString::FormatAssembly; TypeString::AppendType(*ss, th, format); } } else { MAKE_WIDEPTR_FROMUTF8N(pWideStr, pCaType->szEnumName, pCaType->cEnumName) IfFailGo(PostError(META_E_CA_UNEXPECTED_TYPE, wcslen(pWideStr), pWideStr)); } } ErrExit: return hr; } /*static*/ void Attribute::SetBlittableCaValue(CustomAttributeValue* pVal, CaValue* pCaVal, BOOL* pbAllBlittableCa) { WRAPPER_NO_CONTRACT; CorSerializationType type = pCaVal->type.tag; pVal->m_type.m_tag = pCaVal->type.tag; pVal->m_type.m_arrayType = pCaVal->type.arrayType; pVal->m_type.m_enumType = pCaVal->type.enumType; pVal->m_rawValue = 0; if (type == SERIALIZATION_TYPE_STRING || type == SERIALIZATION_TYPE_SZARRAY || type == SERIALIZATION_TYPE_TYPE) { *pbAllBlittableCa = FALSE; } else { // Enum arg -> Object param if (type == SERIALIZATION_TYPE_ENUM && pCaVal->type.cEnumName) *pbAllBlittableCa = FALSE; pVal->m_rawValue = pCaVal->i8; } } /*static*/ void Attribute::SetManagedValue(CustomAttributeManagedValues gc, CustomAttributeValue* pValue) { WRAPPER_NO_CONTRACT; CorSerializationType type = pValue->m_type.m_tag; if (type == SERIALIZATION_TYPE_TYPE || type == SERIALIZATION_TYPE_STRING) { SetObjectReference((OBJECTREF*)&pValue->m_enumOrTypeName, gc.string); } else if (type == SERIALIZATION_TYPE_ENUM) { SetObjectReference((OBJECTREF*)&pValue->m_type.m_enumName, gc.string); } else if (type == SERIALIZATION_TYPE_SZARRAY) { SetObjectReference((OBJECTREF*)&pValue->m_value, gc.array); if (pValue->m_type.m_arrayType == SERIALIZATION_TYPE_ENUM) SetObjectReference((OBJECTREF*)&pValue->m_type.m_enumName, gc.string); } } /*static*/ CustomAttributeManagedValues Attribute::GetManagedCaValue(CaValue* pCaVal) { WRAPPER_NO_CONTRACT; CustomAttributeManagedValues gc; ZeroMemory(&gc, sizeof(gc)); GCPROTECT_BEGIN(gc) { CorSerializationType type = pCaVal->type.tag; if (type == SERIALIZATION_TYPE_ENUM) { gc.string = StringObject::NewString(pCaVal->type.szEnumName, pCaVal->type.cEnumName); } else if (type == SERIALIZATION_TYPE_STRING) { gc.string = NULL; if (pCaVal->str.pStr) gc.string = StringObject::NewString(pCaVal->str.pStr, pCaVal->str.cbStr); } else if (type == SERIALIZATION_TYPE_TYPE) { gc.string = StringObject::NewString(pCaVal->str.pStr, pCaVal->str.cbStr); } else if (type == SERIALIZATION_TYPE_SZARRAY) { CorSerializationType arrayType = pCaVal->type.arrayType; ULONG length = pCaVal->arr.length; BOOL bAllBlittableCa = arrayType != SERIALIZATION_TYPE_ENUM; if (arrayType == SERIALIZATION_TYPE_ENUM) gc.string = StringObject::NewString(pCaVal->type.szEnumName, pCaVal->type.cEnumName); if (length != (ULONG)-1) { gc.array = (CaValueArrayREF)AllocateSzArray(TypeHandle(CoreLibBinder::GetClass(CLASS__CUSTOM_ATTRIBUTE_ENCODED_ARGUMENT)).MakeSZArray(), length); CustomAttributeValue* pValues = gc.array->GetDirectPointerToNonObjectElements(); for (COUNT_T i = 0; i < length; i ++) Attribute::SetBlittableCaValue(&pValues[i], &pCaVal->arr[i], &bAllBlittableCa); if (!bAllBlittableCa) { for (COUNT_T i = 0; i < length; i ++) { CustomAttributeManagedValues managedCaValue = Attribute::GetManagedCaValue(&pCaVal->arr[i]); Attribute::SetManagedValue( managedCaValue, &gc.array->GetDirectPointerToNonObjectElements()[i]); } } } } } GCPROTECT_END(); return gc; } /*static*/ HRESULT Attribute::ParseAttributeArgumentValues( void* pCa, INT32 cCa, CaValueArrayFactory* pCaValueArrayFactory, CaArg* pCaArgs, COUNT_T cArgs, CaNamedArg* pCaNamedArgs, COUNT_T cNamedArgs, DomainAssembly* pDomainAssembly) { WRAPPER_NO_CONTRACT; HRESULT hr = S_OK; CustomAttributeParser cap(pCa, cCa); IfFailGo(Attribute::ParseCaCtorArgs(cap, pCaArgs, cArgs, pCaValueArrayFactory, pDomainAssembly)); IfFailGo(Attribute::ParseCaNamedArgs(cap, pCaNamedArgs, cNamedArgs, pCaValueArrayFactory, pDomainAssembly)); ErrExit: return hr; } //--------------------------------------------------------------------------------------- // // Helper to parse the values for the ctor argument list and the named argument list. // HRESULT Attribute::ParseCaValue( CustomAttributeParser &ca, CaValue* pCaArg, CaType* pCaParam, CaValueArrayFactory* pCaValueArrayFactory, DomainAssembly* pDomainAssembly) { CONTRACTL { PRECONDITION(CheckPointer(pCaArg)); PRECONDITION(CheckPointer(pCaParam)); PRECONDITION(CheckPointer(pCaValueArrayFactory)); THROWS; } CONTRACTL_END; HRESULT hr = S_OK; CorSerializationType underlyingType; CaType elementType; if (pCaParam->tag == SERIALIZATION_TYPE_TAGGED_OBJECT) IfFailGo(Attribute::ParseCaType(ca, &pCaArg->type, pDomainAssembly)); else pCaArg->type = *pCaParam; underlyingType = pCaArg->type.tag == SERIALIZATION_TYPE_ENUM ? pCaArg->type.enumType : pCaArg->type.tag; // Grab the value. switch (underlyingType) { case SERIALIZATION_TYPE_BOOLEAN: case SERIALIZATION_TYPE_I1: case SERIALIZATION_TYPE_U1: IfFailGo(ca.GetU1(&pCaArg->u1)); break; case SERIALIZATION_TYPE_CHAR: case SERIALIZATION_TYPE_I2: case SERIALIZATION_TYPE_U2: IfFailGo(ca.GetU2(&pCaArg->u2)); break; case SERIALIZATION_TYPE_I4: case SERIALIZATION_TYPE_U4: IfFailGo(ca.GetU4(&pCaArg->u4)); break; case SERIALIZATION_TYPE_I8: case SERIALIZATION_TYPE_U8: IfFailGo(ca.GetU8(&pCaArg->u8)); break; case SERIALIZATION_TYPE_R4: IfFailGo(ca.GetR4(&pCaArg->r4)); break; case SERIALIZATION_TYPE_R8: IfFailGo(ca.GetR8(&pCaArg->r8)); break; case SERIALIZATION_TYPE_STRING: case SERIALIZATION_TYPE_TYPE: IfFailGo(ca.GetString(&pCaArg->str.pStr, &pCaArg->str.cbStr)); break; case SERIALIZATION_TYPE_SZARRAY: UINT32 len; IfFailGo(ca.GetU4(&len)); pCaArg->arr.length = len; pCaArg->arr.pSArray = NULL; if (pCaArg->arr.length == (ULONG)-1) break; IfNullGo(pCaArg->arr.pSArray = pCaValueArrayFactory->Create()); elementType.Init(pCaArg->type.arrayType, SERIALIZATION_TYPE_UNDEFINED, pCaArg->type.enumType, pCaArg->type.szEnumName, pCaArg->type.cEnumName); for (ULONG i = 0; i < pCaArg->arr.length; i++) IfFailGo(Attribute::ParseCaValue(ca, &*pCaArg->arr.pSArray->Append(), &elementType, pCaValueArrayFactory, pDomainAssembly)); break; default: // The format of the custom attribute record is invalid. hr = E_FAIL; break; } // End switch ErrExit: return hr; } /*static*/ HRESULT Attribute::ParseCaCtorArgs( CustomAttributeParser &ca, CaArg* pArgs, ULONG cArgs, CaValueArrayFactory* pCaValueArrayFactory, DomainAssembly* pDomainAssembly) { WRAPPER_NO_CONTRACT; HRESULT hr = S_OK; // A result. ULONG ix; // Loop control. // If there is a blob, check the prolog. if (FAILED(ca.ValidateProlog())) { IfFailGo(PostError(META_E_CA_INVALID_BLOB)); } // For each expected arg... for (ix=0; ix<cArgs; ++ix) { CaArg* pArg = &pArgs[ix]; IfFailGo(Attribute::ParseCaValue(ca, &pArg->val, &pArg->type, pCaValueArrayFactory, pDomainAssembly)); } ErrExit: return hr; } //--------------------------------------------------------------------------------------- // // Because ParseKnowCaNamedArgs MD cannot have VM dependency, we have our own implementation here: // 1. It needs to load the assemblies that contain the enum types for the named arguments, // 2. It Compares the enum type name with that of the loaded enum type, not the one in the CA record. // /*static*/ HRESULT Attribute::ParseCaNamedArgs( CustomAttributeParser &ca, CaNamedArg *pNamedParams, ULONG cNamedParams, CaValueArrayFactory* pCaValueArrayFactory, DomainAssembly* pDomainAssembly) { CONTRACTL { PRECONDITION(CheckPointer(pCaValueArrayFactory)); PRECONDITION(CheckPointer(pDomainAssembly)); THROWS; } CONTRACTL_END; HRESULT hr = S_OK; ULONG ixParam; INT32 ixArg; INT16 cActualArgs; CaNamedArgCtor namedArg; CaNamedArg* pNamedParam; // Get actual count of named arguments. if (FAILED(ca.GetI2(&cActualArgs))) cActualArgs = 0; // Everett behavior for (ixParam = 0; ixParam < cNamedParams; ixParam++) pNamedParams[ixParam].val.type.tag = SERIALIZATION_TYPE_UNDEFINED; // For each named argument... for (ixArg = 0; ixArg < cActualArgs; ixArg++) { // Field or property? IfFailGo(ca.GetTag(&namedArg.propertyOrField)); if (namedArg.propertyOrField != SERIALIZATION_TYPE_FIELD && namedArg.propertyOrField != SERIALIZATION_TYPE_PROPERTY) IfFailGo(PostError(META_E_CA_INVALID_ARGTYPE)); // Get argument type information CaType* pNamedArgType = &namedArg.type; StackSString ss; IfFailGo(Attribute::ParseCaType(ca, pNamedArgType, pDomainAssembly, &ss)); LPCSTR szLoadedEnumName = NULL; StackScratchBuffer buff; if (pNamedArgType->tag == SERIALIZATION_TYPE_ENUM || (pNamedArgType->tag == SERIALIZATION_TYPE_SZARRAY && pNamedArgType->arrayType == SERIALIZATION_TYPE_ENUM )) { szLoadedEnumName = ss.GetUTF8(buff); } // Get name of Arg. if (FAILED(ca.GetNonEmptyString(&namedArg.szName, &namedArg.cName))) IfFailGo(PostError(META_E_CA_INVALID_BLOB)); // Match arg by name and type for (ixParam = 0; ixParam < cNamedParams; ixParam++) { pNamedParam = &pNamedParams[ixParam]; // Match type if (pNamedParam->type.tag != SERIALIZATION_TYPE_TAGGED_OBJECT) { if (namedArg.type.tag != pNamedParam->type.tag) continue; // Match array type if (namedArg.type.tag == SERIALIZATION_TYPE_SZARRAY && pNamedParam->type.arrayType != SERIALIZATION_TYPE_TAGGED_OBJECT && namedArg.type.arrayType != pNamedParam->type.arrayType) continue; } // Match name (and its length to avoid substring matching) if ((pNamedParam->cName != namedArg.cName) || (strncmp(pNamedParam->szName, namedArg.szName, namedArg.cName) != 0)) { continue; } // If enum, match enum name. if (pNamedParam->type.tag == SERIALIZATION_TYPE_ENUM || (pNamedParam->type.tag == SERIALIZATION_TYPE_SZARRAY && pNamedParam->type.arrayType == SERIALIZATION_TYPE_ENUM )) { // pNamedParam->type.szEnumName: module->CA record->ctor token->loaded type->field/property->field/property type->field/property type name // namedArg.type.szEnumName: module->CA record->named arg->enum type name // szLoadedEnumName: module->CA record->named arg->enum type name->loaded enum type->loaded enum type name // Comparing pNamedParam->type.szEnumName against namedArg.type.szEnumName could fail if we loaded a different version // of the enum type than the one specified in the CA record. So we are comparing it against szLoadedEnumName instead. if (strncmp(pNamedParam->type.szEnumName, szLoadedEnumName, pNamedParam->type.cEnumName) != 0) continue; if (namedArg.type.enumType != pNamedParam->type.enumType) { MAKE_WIDEPTR_FROMUTF8N(pWideStr, pNamedParam->type.szEnumName, pNamedParam->type.cEnumName) IfFailGo(PostError(META_E_CA_UNEXPECTED_TYPE, wcslen(pWideStr), pWideStr)); } // TODO: For now assume the property\field array size is correct - later we should verify this } // Found a match. break; } // Better have found an argument. if (ixParam == cNamedParams) { MAKE_WIDEPTR_FROMUTF8N(pWideStr, namedArg.szName, namedArg.cName) IfFailGo(PostError(META_E_CA_UNKNOWN_ARGUMENT, wcslen(pWideStr), pWideStr)); } // Argument had better not have been seen already. if (pNamedParams[ixParam].val.type.tag != SERIALIZATION_TYPE_UNDEFINED) { MAKE_WIDEPTR_FROMUTF8N(pWideStr, namedArg.szName, namedArg.cName) IfFailGo(PostError(META_E_CA_REPEATED_ARG, wcslen(pWideStr), pWideStr)); } IfFailGo(Attribute::ParseCaValue(ca, &pNamedParams[ixParam].val, &namedArg.type, pCaValueArrayFactory, pDomainAssembly)); } ErrExit: return hr; } /*static*/ HRESULT Attribute::InitCaType(CustomAttributeType* pType, Factory<SString>* pSstringFactory, Factory<StackScratchBuffer>* pStackScratchBufferFactory, CaType* pCaType) { CONTRACTL { THROWS; PRECONDITION(CheckPointer(pType)); PRECONDITION(CheckPointer(pSstringFactory)); PRECONDITION(CheckPointer(pStackScratchBufferFactory)); PRECONDITION(CheckPointer(pCaType)); } CONTRACTL_END; HRESULT hr = S_OK; SString* psszName = NULL; StackScratchBuffer* scratchBuffer = NULL; IfNullGo(psszName = pSstringFactory->Create()); IfNullGo(scratchBuffer = pStackScratchBufferFactory->Create()); psszName->Set(pType->m_enumName == NULL ? NULL : pType->m_enumName->GetBuffer()); pCaType->Init( pType->m_tag, pType->m_arrayType, pType->m_enumType, psszName->GetUTF8(*scratchBuffer), (ULONG)psszName->GetCount()); ErrExit: return hr; } FCIMPL5(VOID, Attribute::ParseAttributeArguments, void* pCa, INT32 cCa, CaArgArrayREF* ppCustomAttributeArguments, CaNamedArgArrayREF* ppCustomAttributeNamedArguments, AssemblyBaseObject* pAssemblyUNSAFE) { FCALL_CONTRACT; ASSEMBLYREF refAssembly = (ASSEMBLYREF)ObjectToOBJECTREF(pAssemblyUNSAFE); HELPER_METHOD_FRAME_BEGIN_1(refAssembly) { DomainAssembly *pDomainAssembly = refAssembly->GetDomainAssembly(); struct { CustomAttributeArgument* pArgs; CustomAttributeNamedArgument* pNamedArgs; } gc; gc.pArgs = NULL; gc.pNamedArgs = NULL; HRESULT hr = S_OK; GCPROTECT_BEGININTERIOR(gc); BOOL bAllBlittableCa = TRUE; COUNT_T cArgs = 0; COUNT_T cNamedArgs = 0; CaArg* pCaArgs = NULL; CaNamedArg* pCaNamedArgs = NULL; #ifdef __GNUC__ // When compiling under GCC we have to use the -fstack-check option to ensure we always spot stack // overflow. But this option is intolerant of locals growing too large, so we have to cut back a bit // on what we can allocate inline here. Leave the Windows versions alone to retain the perf benefits // since we don't have the same constraints. NewHolder<CaValueArrayFactory> pCaValueArrayFactory = new InlineFactory<SArray<CaValue>, 4>(); InlineFactory<StackScratchBuffer, 4> stackScratchBufferFactory; InlineFactory<SString, 4> sstringFactory; #else // __GNUC__ // Preallocate 4 elements in each of the following factories for optimal performance. // 4 is enough for 4 typed args or 2 named args which are enough for 99% of the cases. // SArray<CaValue> is only needed if a argument is an array, don't preallocate any memory as arrays are rare. // Need one per (ctor or named) arg + one per array element InlineFactory<SArray<CaValue>, 4> caValueArrayFactory; InlineFactory<SArray<CaValue>, 4> *pCaValueArrayFactory = &caValueArrayFactory; // Need one StackScratchBuffer per ctor arg and two per named arg InlineFactory<StackScratchBuffer, 4> stackScratchBufferFactory; // Need one SString per ctor arg and two per named arg InlineFactory<SString, 4> sstringFactory; #endif // __GNUC__ cArgs = (*ppCustomAttributeArguments)->GetNumComponents(); if (cArgs) { gc.pArgs = (*ppCustomAttributeArguments)->GetDirectPointerToNonObjectElements(); size_t size = sizeof(CaArg) * cArgs; if ((size / sizeof(CaArg)) != cArgs) // uint over/underflow IfFailGo(E_INVALIDARG); pCaArgs = (CaArg*)_alloca(size); for (COUNT_T i = 0; i < cArgs; i ++) { CaType caType; IfFailGo(Attribute::InitCaType(&gc.pArgs[i].m_type, &sstringFactory, &stackScratchBufferFactory, &caType)); pCaArgs[i].Init(caType); } } cNamedArgs = (*ppCustomAttributeNamedArguments)->GetNumComponents(); if (cNamedArgs) { gc.pNamedArgs = (*ppCustomAttributeNamedArguments)->GetDirectPointerToNonObjectElements(); size_t size = sizeof(CaNamedArg) * cNamedArgs; if ((size / sizeof(CaNamedArg)) != cNamedArgs) // uint over/underflow IfFailGo(E_INVALIDARG); pCaNamedArgs = (CaNamedArg*)_alloca(size); for (COUNT_T i = 0; i < cNamedArgs; i ++) { CustomAttributeNamedArgument* pNamedArg = &gc.pNamedArgs[i]; CaType caType; IfFailGo(Attribute::InitCaType(&pNamedArg->m_type, &sstringFactory, &stackScratchBufferFactory, &caType)); SString* psszName = NULL; IfNullGo(psszName = sstringFactory.Create()); psszName->Set(pNamedArg->m_argumentName->GetBuffer()); StackScratchBuffer* scratchBuffer = NULL; IfNullGo(scratchBuffer = stackScratchBufferFactory.Create()); pCaNamedArgs[i].Init( psszName->GetUTF8(*scratchBuffer), pNamedArg->m_propertyOrField, caType); } } // This call maps the named parameters (fields and arguments) and ctor parameters with the arguments in the CA record // and retrieve their values. IfFailGo(Attribute::ParseAttributeArgumentValues(pCa, cCa, pCaValueArrayFactory, pCaArgs, cArgs, pCaNamedArgs, cNamedArgs, pDomainAssembly)); for (COUNT_T i = 0; i < cArgs; i ++) Attribute::SetBlittableCaValue(&gc.pArgs[i].m_value, &pCaArgs[i].val, &bAllBlittableCa); for (COUNT_T i = 0; i < cNamedArgs; i ++) Attribute::SetBlittableCaValue(&gc.pNamedArgs[i].m_value, &pCaNamedArgs[i].val, &bAllBlittableCa); if (!bAllBlittableCa) { for (COUNT_T i = 0; i < cArgs; i ++) { CustomAttributeManagedValues managedCaValue = Attribute::GetManagedCaValue(&pCaArgs[i].val); Attribute::SetManagedValue(managedCaValue, &(gc.pArgs[i].m_value)); } for (COUNT_T i = 0; i < cNamedArgs; i++) { CustomAttributeManagedValues managedCaValue = Attribute::GetManagedCaValue(&pCaNamedArgs[i].val); Attribute::SetManagedValue(managedCaValue, &(gc.pNamedArgs[i].m_value)); } } ErrExit: ; // Need empty statement to get GCPROTECT_END below to work. GCPROTECT_END(); if (hr != S_OK) { if ((hr == E_OUTOFMEMORY) || (hr == NTE_NO_MEMORY)) { COMPlusThrow(kOutOfMemoryException); } else { COMPlusThrow(kCustomAttributeFormatException); } } } HELPER_METHOD_FRAME_END(); } FCIMPLEND FCIMPL6(LPVOID, COMCustomAttribute::CreateCaObject, ReflectModuleBaseObject* pAttributedModuleUNSAFE, ReflectClassBaseObject* pCaTypeUNSAFE, ReflectMethodObject *pMethodUNSAFE, BYTE** ppBlob, BYTE* pEndBlob, INT32* pcNamedArgs) { FCALL_CONTRACT; struct { REFLECTCLASSBASEREF refCaType; OBJECTREF ca; REFLECTMETHODREF refCtor; REFLECTMODULEBASEREF refAttributedModule; } gc; gc.refCaType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pCaTypeUNSAFE); TypeHandle th = gc.refCaType->GetType(); gc.ca = NULL; gc.refCtor = (REFLECTMETHODREF)ObjectToOBJECTREF(pMethodUNSAFE); gc.refAttributedModule = (REFLECTMODULEBASEREF)ObjectToOBJECTREF(pAttributedModuleUNSAFE); if(gc.refAttributedModule == NULL) FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle")); MethodDesc* pCtorMD = gc.refCtor->GetMethod(); HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc); { MethodDescCallSite ctorCallSite(pCtorMD, th); MetaSig* pSig = ctorCallSite.GetMetaSig(); BYTE* pBlob = *ppBlob; // get the number of arguments and allocate an array for the args ARG_SLOT *args = NULL; UINT cArgs = pSig->NumFixedArgs() + 1; // make room for the this pointer UINT i = 1; // used to flag that we actually get the right number of arg from the blob args = (ARG_SLOT*)_alloca(cArgs * sizeof(ARG_SLOT)); memset((void*)args, 0, cArgs * sizeof(ARG_SLOT)); OBJECTREF *argToProtect = (OBJECTREF*)_alloca(cArgs * sizeof(OBJECTREF)); memset((void*)argToProtect, 0, cArgs * sizeof(OBJECTREF)); // load the this pointer argToProtect[0] = gc.refCaType->GetType().GetMethodTable()->Allocate(); // this is the value to return after the ctor invocation if (pBlob) { if (pBlob < pEndBlob) { if (pBlob + 2 > pEndBlob) { COMPlusThrow(kCustomAttributeFormatException); } INT16 prolog = GET_UNALIGNED_VAL16(pBlob); if (prolog != 1) COMPlusThrow(kCustomAttributeFormatException); pBlob += 2; } if (cArgs > 1) { GCPROTECT_ARRAY_BEGIN(*argToProtect, cArgs); { // loop through the args for (i = 1; i < cArgs; i++) { CorElementType type = pSig->NextArg(); if (type == ELEMENT_TYPE_END) break; BOOL bObjectCreated = FALSE; TypeHandle th = pSig->GetLastTypeHandleThrowing(); if (th.IsArray()) // get the array element th = th.GetArrayElementTypeHandle(); ARG_SLOT data = GetDataFromBlob(pCtorMD->GetAssembly(), (CorSerializationType)type, th, &pBlob, pEndBlob, gc.refAttributedModule->GetModule(), &bObjectCreated); if (bObjectCreated) argToProtect[i] = ArgSlotToObj(data); else args[i] = data; } } GCPROTECT_END(); // We have borrowed the signature from MethodDescCallSite. We have to put it back into the initial position // because of that's where MethodDescCallSite expects to find it below. pSig->Reset(); for (i = 1; i < cArgs; i++) { if (argToProtect[i] != NULL) { _ASSERTE(args[i] == NULL); args[i] = ObjToArgSlot(argToProtect[i]); } } } } args[0] = ObjToArgSlot(argToProtect[0]); if (i != cArgs) COMPlusThrow(kCustomAttributeFormatException); // check if there are any named properties to invoke, // if so set the by ref int passed in to point // to the blob position where name properties start *pcNamedArgs = 0; if (pBlob && pBlob != pEndBlob) { if (pBlob + 2 > pEndBlob) COMPlusThrow(kCustomAttributeFormatException); *pcNamedArgs = GET_UNALIGNED_VAL16(pBlob); pBlob += 2; } *ppBlob = pBlob; if (*pcNamedArgs == 0 && pBlob != pEndBlob) COMPlusThrow(kCustomAttributeFormatException); // make the invocation to the ctor gc.ca = ArgSlotToObj(args[0]); if (pCtorMD->GetMethodTable()->IsValueType()) args[0] = PtrToArgSlot(OBJECTREFToObject(gc.ca)->UnBox()); ctorCallSite.CallWithValueTypes(args); } HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(gc.ca); } FCIMPLEND FCIMPL5(VOID, COMCustomAttribute::ParseAttributeUsageAttribute, PVOID pData, ULONG cData, ULONG* pTargets, CLR_BOOL* pInherited, CLR_BOOL* pAllowMultiple) { FCALL_CONTRACT; int inherited = 0; int allowMultiple = 1; { CustomAttributeParser ca(pData, cData); CaArg args[1]; args[0].InitEnum(SERIALIZATION_TYPE_I4, 0); if (FAILED(::ParseKnownCaArgs(ca, args, ARRAY_SIZE(args)))) { HELPER_METHOD_FRAME_BEGIN_0(); COMPlusThrow(kCustomAttributeFormatException); HELPER_METHOD_FRAME_END(); } *pTargets = args[0].val.u4; CaNamedArg namedArgs[2]; CaType namedArgTypes[2]; namedArgTypes[inherited].Init(SERIALIZATION_TYPE_BOOLEAN); namedArgTypes[allowMultiple].Init(SERIALIZATION_TYPE_BOOLEAN); namedArgs[inherited].Init("Inherited", SERIALIZATION_TYPE_PROPERTY, namedArgTypes[inherited], TRUE); namedArgs[allowMultiple].Init("AllowMultiple", SERIALIZATION_TYPE_PROPERTY, namedArgTypes[allowMultiple], FALSE); if (FAILED(::ParseKnownCaNamedArgs(ca, namedArgs, ARRAY_SIZE(namedArgs)))) { HELPER_METHOD_FRAME_BEGIN_0(); COMPlusThrow(kCustomAttributeFormatException); HELPER_METHOD_FRAME_END(); } *pInherited = namedArgs[inherited].val.boolean == TRUE; *pAllowMultiple = namedArgs[allowMultiple].val.boolean == TRUE; } } FCIMPLEND FCIMPL7(void, COMCustomAttribute::GetPropertyOrFieldData, ReflectModuleBaseObject *pModuleUNSAFE, BYTE** ppBlobStart, BYTE* pBlobEnd, STRINGREF* pName, CLR_BOOL* pbIsProperty, OBJECTREF* pType, OBJECTREF* value) { FCALL_CONTRACT; BYTE* pBlob = *ppBlobStart; *pType = NULL; REFLECTMODULEBASEREF refModule = (REFLECTMODULEBASEREF)ObjectToOBJECTREF(pModuleUNSAFE); if(refModule == NULL) FCThrowResVoid(kArgumentNullException, W("Arg_InvalidHandle")); Module *pModule = refModule->GetModule(); HELPER_METHOD_FRAME_BEGIN_1(refModule); { Assembly *pCtorAssembly = NULL; MethodTable *pMTValue = NULL; CorSerializationType arrayType = SERIALIZATION_TYPE_BOOLEAN; BOOL bObjectCreated = FALSE; TypeHandle nullTH; if (pBlob + 2 > pBlobEnd) COMPlusThrow(kCustomAttributeFormatException); // get whether it is a field or a property CorSerializationType propOrField = (CorSerializationType)*pBlob; pBlob++; if (propOrField == SERIALIZATION_TYPE_FIELD) *pbIsProperty = FALSE; else if (propOrField == SERIALIZATION_TYPE_PROPERTY) *pbIsProperty = TRUE; else COMPlusThrow(kCustomAttributeFormatException); // get the type of the field CorSerializationType fieldType = (CorSerializationType)*pBlob; pBlob++; if (fieldType == SERIALIZATION_TYPE_SZARRAY) { arrayType = (CorSerializationType)*pBlob; if (pBlob + 1 > pBlobEnd) COMPlusThrow(kCustomAttributeFormatException); pBlob++; } if (fieldType == SERIALIZATION_TYPE_ENUM || arrayType == SERIALIZATION_TYPE_ENUM) { // get the enum type ReflectClassBaseObject *pEnum = (ReflectClassBaseObject*)OBJECTREFToObject(ArgSlotToObj(GetDataFromBlob( pCtorAssembly, SERIALIZATION_TYPE_TYPE, nullTH, &pBlob, pBlobEnd, pModule, &bObjectCreated))); if (pEnum == NULL) COMPlusThrow(kCustomAttributeFormatException); _ASSERTE(bObjectCreated); TypeHandle th = pEnum->GetType(); _ASSERTE(th.IsEnum()); pMTValue = th.AsMethodTable(); if (fieldType == SERIALIZATION_TYPE_ENUM) // load the enum type to pass it back *pType = th.GetManagedClassObject(); else nullTH = th; } // get the string representing the field/property name *pName = ArgSlotToString(GetDataFromBlob( pCtorAssembly, SERIALIZATION_TYPE_STRING, nullTH, &pBlob, pBlobEnd, pModule, &bObjectCreated)); _ASSERTE(bObjectCreated || *pName == NULL); // create the object and return it switch (fieldType) { case SERIALIZATION_TYPE_TAGGED_OBJECT: *pType = g_pObjectClass->GetManagedClassObject(); FALLTHROUGH; case SERIALIZATION_TYPE_TYPE: case SERIALIZATION_TYPE_STRING: *value = ArgSlotToObj(GetDataFromBlob( pCtorAssembly, fieldType, nullTH, &pBlob, pBlobEnd, pModule, &bObjectCreated)); _ASSERTE(bObjectCreated || *value == NULL); if (*value == NULL) { // load the proper type so that code in managed knows which property to load if (fieldType == SERIALIZATION_TYPE_STRING) *pType = CoreLibBinder::GetElementType(ELEMENT_TYPE_STRING)->GetManagedClassObject(); else if (fieldType == SERIALIZATION_TYPE_TYPE) *pType = CoreLibBinder::GetClass(CLASS__TYPE)->GetManagedClassObject(); } break; case SERIALIZATION_TYPE_SZARRAY: { *value = NULL; int arraySize = (int)GetDataFromBlob(pCtorAssembly, SERIALIZATION_TYPE_I4, nullTH, &pBlob, pBlobEnd, pModule, &bObjectCreated); if (arraySize != -1) { _ASSERTE(!bObjectCreated); if (arrayType == SERIALIZATION_TYPE_STRING) nullTH = TypeHandle(CoreLibBinder::GetElementType(ELEMENT_TYPE_STRING)); else if (arrayType == SERIALIZATION_TYPE_TYPE) nullTH = TypeHandle(CoreLibBinder::GetClass(CLASS__TYPE)); else if (arrayType == SERIALIZATION_TYPE_TAGGED_OBJECT) nullTH = TypeHandle(g_pObjectClass); ReadArray(pCtorAssembly, arrayType, arraySize, nullTH, &pBlob, pBlobEnd, pModule, (BASEARRAYREF*)value); } if (*value == NULL) { TypeHandle arrayTH; switch (arrayType) { case SERIALIZATION_TYPE_STRING: arrayTH = TypeHandle(CoreLibBinder::GetElementType(ELEMENT_TYPE_STRING)); break; case SERIALIZATION_TYPE_TYPE: arrayTH = TypeHandle(CoreLibBinder::GetClass(CLASS__TYPE)); break; case SERIALIZATION_TYPE_TAGGED_OBJECT: arrayTH = TypeHandle(g_pObjectClass); break; default: if (SERIALIZATION_TYPE_BOOLEAN <= arrayType && arrayType <= SERIALIZATION_TYPE_R8) arrayTH = TypeHandle(CoreLibBinder::GetElementType((CorElementType)arrayType)); } if (!arrayTH.IsNull()) { arrayTH = ClassLoader::LoadArrayTypeThrowing(arrayTH); *pType = arrayTH.GetManagedClassObject(); } } break; } default: if (SERIALIZATION_TYPE_BOOLEAN <= fieldType && fieldType <= SERIALIZATION_TYPE_R8) pMTValue = CoreLibBinder::GetElementType((CorElementType)fieldType); else if(fieldType == SERIALIZATION_TYPE_ENUM) fieldType = (CorSerializationType)pMTValue->GetInternalCorElementType(); else COMPlusThrow(kCustomAttributeFormatException); ARG_SLOT val = GetDataFromBlob(pCtorAssembly, fieldType, nullTH, &pBlob, pBlobEnd, pModule, &bObjectCreated); _ASSERTE(!bObjectCreated); *value = pMTValue->Box((void*)ArgSlotEndianessFixup(&val, pMTValue->GetNumInstanceFieldBytes())); } *ppBlobStart = pBlob; } HELPER_METHOD_FRAME_END(); } FCIMPLEND /*static*/ TypeHandle COMCustomAttribute::GetTypeHandleFromBlob(Assembly *pCtorAssembly, CorSerializationType objType, BYTE **pBlob, const BYTE *endBlob, Module *pModule) { CONTRACTL { THROWS; } CONTRACTL_END; // we must box which means we must get the method table, switch again on the element type MethodTable *pMTType = NULL; TypeHandle nullTH; TypeHandle RtnTypeHnd; switch ((DWORD)objType) { case SERIALIZATION_TYPE_BOOLEAN: case SERIALIZATION_TYPE_I1: case SERIALIZATION_TYPE_U1: case SERIALIZATION_TYPE_CHAR: case SERIALIZATION_TYPE_I2: case SERIALIZATION_TYPE_U2: case SERIALIZATION_TYPE_I4: case SERIALIZATION_TYPE_U4: case SERIALIZATION_TYPE_R4: case SERIALIZATION_TYPE_I8: case SERIALIZATION_TYPE_U8: case SERIALIZATION_TYPE_R8: case SERIALIZATION_TYPE_STRING: pMTType = CoreLibBinder::GetElementType((CorElementType)objType); RtnTypeHnd = TypeHandle(pMTType); break; case ELEMENT_TYPE_CLASS: pMTType = CoreLibBinder::GetClass(CLASS__TYPE); RtnTypeHnd = TypeHandle(pMTType); break; case SERIALIZATION_TYPE_TAGGED_OBJECT: pMTType = g_pObjectClass; RtnTypeHnd = TypeHandle(pMTType); break; case SERIALIZATION_TYPE_TYPE: { int size = GetStringSize(pBlob, endBlob); if (size == -1) return nullTH; if ((size+1 <= 1) || (size > endBlob - *pBlob)) COMPlusThrow(kCustomAttributeFormatException); LPUTF8 szName = (LPUTF8)_alloca(size + 1); memcpy(szName, *pBlob, size); *pBlob += size; szName[size] = 0; RtnTypeHnd = TypeName::GetTypeUsingCASearchRules(szName, pModule->GetAssembly(), NULL, FALSE); break; } case SERIALIZATION_TYPE_ENUM: { // get the enum type BOOL isObject = FALSE; ReflectClassBaseObject *pType = (ReflectClassBaseObject*)OBJECTREFToObject(ArgSlotToObj(GetDataFromBlob(pCtorAssembly, SERIALIZATION_TYPE_TYPE, nullTH, pBlob, endBlob, pModule, &isObject))); if (pType != NULL) { _ASSERTE(isObject); RtnTypeHnd = pType->GetType(); _ASSERTE((objType == SERIALIZATION_TYPE_ENUM) ? RtnTypeHnd.GetMethodTable()->IsEnum() : TRUE); } else { RtnTypeHnd = TypeHandle(); } break; } default: COMPlusThrow(kCustomAttributeFormatException); } return RtnTypeHnd; } // retrieve the string size in a CA blob. Advance the blob pointer to point to // the beginning of the string immediately following the size /*static*/ int COMCustomAttribute::GetStringSize(BYTE **pBlob, const BYTE *endBlob) { CONTRACTL { THROWS; } CONTRACTL_END; if (*pBlob >= endBlob ) { // No buffer at all, or buffer overrun COMPlusThrow(kCustomAttributeFormatException); } if (**pBlob == 0xFF) { // Special case null string. ++(*pBlob); return -1; } ULONG ulSize; if (FAILED(CPackedLen::SafeGetData((BYTE const *)*pBlob, (BYTE const *)endBlob, (ULONG *)&ulSize, (BYTE const **)pBlob))) { COMPlusThrow(kCustomAttributeFormatException); } return (int)ulSize; } // copy the values of an array of integers from a CA blob // (i.e., always stored in little-endian, and needs not be aligned). // Returns TRUE on success, FALSE if the blob was not big enough. // Advances *pBlob by the amount copied. /*static*/ template < typename T > BOOL COMCustomAttribute::CopyArrayVAL(BASEARRAYREF pArray, int nElements, BYTE **pBlob, const BYTE *endBlob) { int sizeData; // = size * 2; with integer overflow check if (!ClrSafeInt<int>::multiply(nElements, sizeof(T), sizeData)) return FALSE; if (*pBlob + sizeData < *pBlob) // integer overflow check return FALSE; if (*pBlob + sizeData > endBlob) return FALSE; #if BIGENDIAN T *ptDest = reinterpret_cast<T *>(pArray->GetDataPtr()); for (int iElement = 0; iElement < nElements; iElement++) { T tValue; BYTE *pbSrc = *pBlob + iElement * sizeof(T); BYTE *pbDest = reinterpret_cast<BYTE *>(&tValue); for (size_t iByte = 0; iByte < sizeof(T); iByte++) { pbDest[sizeof(T) - 1 - iByte] = pbSrc[iByte]; } ptDest[iElement] = tValue; } #else // BIGENDIAN memcpyNoGCRefs(pArray->GetDataPtr(), *pBlob, sizeData); #endif // BIGENDIAN *pBlob += sizeData; return TRUE; } // read the whole array as a chunk /*static*/ void COMCustomAttribute::ReadArray(Assembly *pCtorAssembly, CorSerializationType arrayType, int size, TypeHandle th, BYTE **pBlob, const BYTE *endBlob, Module *pModule, BASEARRAYREF *pArray) { CONTRACTL { THROWS; } CONTRACTL_END; ARG_SLOT element = 0; switch ((DWORD)arrayType) { case SERIALIZATION_TYPE_BOOLEAN: case SERIALIZATION_TYPE_I1: case SERIALIZATION_TYPE_U1: *pArray = (BASEARRAYREF)AllocatePrimitiveArray((CorElementType)arrayType, size); if (!CopyArrayVAL<BYTE>(*pArray, size, pBlob, endBlob)) goto badBlob; break; case SERIALIZATION_TYPE_CHAR: case SERIALIZATION_TYPE_I2: case SERIALIZATION_TYPE_U2: { *pArray = (BASEARRAYREF)AllocatePrimitiveArray((CorElementType)arrayType, size); if (!CopyArrayVAL<UINT16>(*pArray, size, pBlob, endBlob)) goto badBlob; break; } case SERIALIZATION_TYPE_I4: case SERIALIZATION_TYPE_U4: case SERIALIZATION_TYPE_R4: { *pArray = (BASEARRAYREF)AllocatePrimitiveArray((CorElementType)arrayType, size); if (!CopyArrayVAL<UINT32>(*pArray, size, pBlob, endBlob)) goto badBlob; break; } case SERIALIZATION_TYPE_I8: case SERIALIZATION_TYPE_U8: case SERIALIZATION_TYPE_R8: { *pArray = (BASEARRAYREF)AllocatePrimitiveArray((CorElementType)arrayType, size); if (!CopyArrayVAL<UINT64>(*pArray, size, pBlob, endBlob)) goto badBlob; break; } case ELEMENT_TYPE_CLASS: case SERIALIZATION_TYPE_TYPE: case SERIALIZATION_TYPE_STRING: case SERIALIZATION_TYPE_SZARRAY: case SERIALIZATION_TYPE_TAGGED_OBJECT: { BOOL isObject; // If we haven't figured out the type of the array, throw bad blob exception if (th.IsNull()) goto badBlob; *pArray = (BASEARRAYREF)AllocateObjectArray(size, th); if (arrayType == SERIALIZATION_TYPE_SZARRAY) // switch the th to be the proper one th = th.GetArrayElementTypeHandle(); for (int i = 0; i < size; i++) { element = GetDataFromBlob(pCtorAssembly, arrayType, th, pBlob, endBlob, pModule, &isObject); _ASSERTE(isObject || element == NULL); ((PTRARRAYREF)(*pArray))->SetAt(i, ArgSlotToObj(element)); } break; } case SERIALIZATION_TYPE_ENUM: { INT32 bounds = size; // If we haven't figured out the type of the array, throw bad blob exception if (th.IsNull()) goto badBlob; unsigned elementSize = th.GetSize(); TypeHandle arrayHandle = ClassLoader::LoadArrayTypeThrowing(th); if (arrayHandle.IsNull()) goto badBlob; *pArray = (BASEARRAYREF)AllocateSzArray(arrayHandle, bounds); BOOL fSuccess; switch (elementSize) { case 1: fSuccess = CopyArrayVAL<BYTE>(*pArray, size, pBlob, endBlob); break; case 2: fSuccess = CopyArrayVAL<UINT16>(*pArray, size, pBlob, endBlob); break; case 4: fSuccess = CopyArrayVAL<UINT32>(*pArray, size, pBlob, endBlob); break; case 8: fSuccess = CopyArrayVAL<UINT64>(*pArray, size, pBlob, endBlob); break; default: fSuccess = FALSE; } if (!fSuccess) goto badBlob; break; } default: badBlob: COMPlusThrow(kCustomAttributeFormatException); } } // get data out of the blob according to a CorElementType /*static*/ ARG_SLOT COMCustomAttribute::GetDataFromBlob(Assembly *pCtorAssembly, CorSerializationType type, TypeHandle th, BYTE **pBlob, const BYTE *endBlob, Module *pModule, BOOL *bObjectCreated) { CONTRACTL { THROWS; } CONTRACTL_END; ARG_SLOT retValue = 0; *bObjectCreated = FALSE; TypeHandle nullTH; TypeHandle typeHnd; switch ((DWORD)type) { case SERIALIZATION_TYPE_BOOLEAN: case SERIALIZATION_TYPE_I1: case SERIALIZATION_TYPE_U1: if (*pBlob + 1 <= endBlob) { retValue = (ARG_SLOT)**pBlob; *pBlob += 1; break; } goto badBlob; case SERIALIZATION_TYPE_CHAR: case SERIALIZATION_TYPE_I2: case SERIALIZATION_TYPE_U2: if (*pBlob + 2 <= endBlob) { retValue = (ARG_SLOT)GET_UNALIGNED_VAL16(*pBlob); *pBlob += 2; break; } goto badBlob; case SERIALIZATION_TYPE_I4: case SERIALIZATION_TYPE_U4: case SERIALIZATION_TYPE_R4: if (*pBlob + 4 <= endBlob) { retValue = (ARG_SLOT)GET_UNALIGNED_VAL32(*pBlob); *pBlob += 4; break; } goto badBlob; case SERIALIZATION_TYPE_I8: case SERIALIZATION_TYPE_U8: case SERIALIZATION_TYPE_R8: if (*pBlob + 8 <= endBlob) { retValue = (ARG_SLOT)GET_UNALIGNED_VAL64(*pBlob); *pBlob += 8; break; } goto badBlob; case SERIALIZATION_TYPE_STRING: stringType: { int size = GetStringSize(pBlob, endBlob); *bObjectCreated = TRUE; if (size > 0) { if (*pBlob + size < *pBlob) // integer overflow check goto badBlob; if (*pBlob + size > endBlob) goto badBlob; retValue = ObjToArgSlot(StringObject::NewString((LPCUTF8)*pBlob, size)); *pBlob += size; } else if (size == 0) retValue = ObjToArgSlot(StringObject::NewString(0)); else *bObjectCreated = FALSE; break; } // this is coming back from sig but it's not a serialization type, // essentialy the type in the blob and the type in the sig don't match case ELEMENT_TYPE_VALUETYPE: { if (!th.IsEnum()) goto badBlob; CorSerializationType enumType = (CorSerializationType)th.GetInternalCorElementType(); BOOL cannotBeObject = FALSE; retValue = GetDataFromBlob(pCtorAssembly, enumType, nullTH, pBlob, endBlob, pModule, &cannotBeObject); _ASSERTE(!cannotBeObject); break; } // this is coming back from sig but it's not a serialization type, // essentialy the type in the blob and the type in the sig don't match case ELEMENT_TYPE_CLASS: if (th.IsArray()) goto typeArray; else { MethodTable *pMT = th.AsMethodTable(); if (pMT == g_pStringClass) goto stringType; else if (pMT == g_pObjectClass) goto typeObject; else if (CoreLibBinder::IsClass(pMT, CLASS__TYPE)) goto typeType; } goto badBlob; case SERIALIZATION_TYPE_TYPE: typeType: { typeHnd = GetTypeHandleFromBlob(pCtorAssembly, SERIALIZATION_TYPE_TYPE, pBlob, endBlob, pModule); if (!typeHnd.IsNull()) retValue = ObjToArgSlot(typeHnd.GetManagedClassObject()); *bObjectCreated = TRUE; break; } // this is coming back from sig but it's not a serialization type, // essentialy the type in the blob and the type in the sig don't match case ELEMENT_TYPE_OBJECT: case SERIALIZATION_TYPE_TAGGED_OBJECT: typeObject: { // get the byte representing the real type and call GetDataFromBlob again if (*pBlob + 1 > endBlob) goto badBlob; CorSerializationType objType = (CorSerializationType)**pBlob; *pBlob += 1; switch (objType) { case SERIALIZATION_TYPE_SZARRAY: { if (*pBlob + 1 > endBlob) goto badBlob; CorSerializationType arrayType = (CorSerializationType)**pBlob; *pBlob += 1; if (arrayType == SERIALIZATION_TYPE_TYPE) arrayType = (CorSerializationType)ELEMENT_TYPE_CLASS; // grab the array type and make a type handle for it nullTH = GetTypeHandleFromBlob(pCtorAssembly, arrayType, pBlob, endBlob, pModule); FALLTHROUGH; } case SERIALIZATION_TYPE_TYPE: case SERIALIZATION_TYPE_STRING: // notice that the nullTH is actually not null in the array case (see case above) retValue = GetDataFromBlob(pCtorAssembly, objType, nullTH, pBlob, endBlob, pModule, bObjectCreated); _ASSERTE(*bObjectCreated || retValue == 0); break; case SERIALIZATION_TYPE_ENUM: { // // get the enum type typeHnd = GetTypeHandleFromBlob(pCtorAssembly, SERIALIZATION_TYPE_ENUM, pBlob, endBlob, pModule); _ASSERTE(typeHnd.IsTypeDesc() == false); // ok we have the class, now we go and read the data MethodTable *pMT = typeHnd.AsMethodTable(); PREFIX_ASSUME(pMT != NULL); CorSerializationType objNormType = (CorSerializationType)pMT->GetInternalCorElementType(); BOOL isObject = FALSE; retValue = GetDataFromBlob(pCtorAssembly, objNormType, nullTH, pBlob, endBlob, pModule, &isObject); _ASSERTE(!isObject); retValue= ObjToArgSlot(pMT->Box((void*)&retValue)); *bObjectCreated = TRUE; break; } default: { // the common primitive type case. We need to box the primitive typeHnd = GetTypeHandleFromBlob(pCtorAssembly, objType, pBlob, endBlob, pModule); _ASSERTE(typeHnd.IsTypeDesc() == false); retValue = GetDataFromBlob(pCtorAssembly, objType, nullTH, pBlob, endBlob, pModule, bObjectCreated); _ASSERTE(!*bObjectCreated); retValue= ObjToArgSlot(typeHnd.AsMethodTable()->Box((void*)&retValue)); *bObjectCreated = TRUE; break; } } break; } case SERIALIZATION_TYPE_SZARRAY: typeArray: { // read size BOOL isObject = FALSE; int size = (int)GetDataFromBlob(pCtorAssembly, SERIALIZATION_TYPE_I4, nullTH, pBlob, endBlob, pModule, &isObject); _ASSERTE(!isObject); if (size != -1) { CorSerializationType arrayType; if (th.IsEnum()) arrayType = SERIALIZATION_TYPE_ENUM; else arrayType = (CorSerializationType)th.GetInternalCorElementType(); BASEARRAYREF array = NULL; GCPROTECT_BEGIN(array); ReadArray(pCtorAssembly, arrayType, size, th, pBlob, endBlob, pModule, &array); retValue = ObjToArgSlot(array); GCPROTECT_END(); } *bObjectCreated = TRUE; break; } default: badBlob: //<TODO> generate a reasonable text string ("invalid blob or constructor")</TODO> COMPlusThrow(kCustomAttributeFormatException); } return retValue; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "common.h" #include "customattribute.h" #include "invokeutil.h" #include "method.hpp" #include "threads.h" #include "excep.h" #include "corerror.h" #include "classnames.h" #include "fcall.h" #include "assemblynative.hpp" #include "typeparse.h" #include "reflectioninvocation.h" #include "runtimehandles.h" #include "typestring.h" typedef InlineFactory<InlineSString<64>, 16> SStringFactory; /*static*/ TypeHandle Attribute::GetTypeForEnum(LPCUTF8 szEnumName, COUNT_T cbEnumName, DomainAssembly* pDomainAssembly) { CONTRACTL { PRECONDITION(CheckPointer(pDomainAssembly)); PRECONDITION(CheckPointer(szEnumName)); PRECONDITION(cbEnumName); THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; StackScratchBuffer buff; StackSString sszEnumName(SString::Utf8, szEnumName, cbEnumName); return TypeName::GetTypeUsingCASearchRules(sszEnumName.GetUTF8(buff), pDomainAssembly->GetAssembly()); } /*static*/ HRESULT Attribute::ParseCaType( CustomAttributeParser &ca, CaType* pCaType, DomainAssembly* pDomainAssembly, StackSString* ss) { WRAPPER_NO_CONTRACT; HRESULT hr = S_OK; IfFailGo(::ParseEncodedType(ca, pCaType)); if (pCaType->tag == SERIALIZATION_TYPE_ENUM || (pCaType->tag == SERIALIZATION_TYPE_SZARRAY && pCaType->arrayType == SERIALIZATION_TYPE_ENUM )) { TypeHandle th = Attribute::GetTypeForEnum(pCaType->szEnumName, pCaType->cEnumName, pDomainAssembly); if (!th.IsNull() && th.IsEnum()) { pCaType->enumType = (CorSerializationType)th.GetVerifierCorElementType(); // The assembly qualified name of th might not equal pCaType->szEnumName. // e.g. th could be "MyEnum, MyAssembly, Version=4.0.0.0" while // pCaType->szEnumName is "MyEnum, MyAssembly, Version=3.0.0.0" if (ss) { DWORD format = TypeString::FormatNamespace | TypeString::FormatFullInst | TypeString::FormatAssembly; TypeString::AppendType(*ss, th, format); } } else { MAKE_WIDEPTR_FROMUTF8N(pWideStr, pCaType->szEnumName, pCaType->cEnumName) IfFailGo(PostError(META_E_CA_UNEXPECTED_TYPE, wcslen(pWideStr), pWideStr)); } } ErrExit: return hr; } /*static*/ void Attribute::SetBlittableCaValue(CustomAttributeValue* pVal, CaValue* pCaVal, BOOL* pbAllBlittableCa) { WRAPPER_NO_CONTRACT; CorSerializationType type = pCaVal->type.tag; pVal->m_type.m_tag = pCaVal->type.tag; pVal->m_type.m_arrayType = pCaVal->type.arrayType; pVal->m_type.m_enumType = pCaVal->type.enumType; pVal->m_rawValue = 0; if (type == SERIALIZATION_TYPE_STRING || type == SERIALIZATION_TYPE_SZARRAY || type == SERIALIZATION_TYPE_TYPE) { *pbAllBlittableCa = FALSE; } else { // Enum arg -> Object param if (type == SERIALIZATION_TYPE_ENUM && pCaVal->type.cEnumName) *pbAllBlittableCa = FALSE; pVal->m_rawValue = pCaVal->i8; } } /*static*/ void Attribute::SetManagedValue(CustomAttributeManagedValues gc, CustomAttributeValue* pValue) { WRAPPER_NO_CONTRACT; CorSerializationType type = pValue->m_type.m_tag; if (type == SERIALIZATION_TYPE_TYPE || type == SERIALIZATION_TYPE_STRING) { SetObjectReference((OBJECTREF*)&pValue->m_enumOrTypeName, gc.string); } else if (type == SERIALIZATION_TYPE_ENUM) { SetObjectReference((OBJECTREF*)&pValue->m_type.m_enumName, gc.string); } else if (type == SERIALIZATION_TYPE_SZARRAY) { SetObjectReference((OBJECTREF*)&pValue->m_value, gc.array); if (pValue->m_type.m_arrayType == SERIALIZATION_TYPE_ENUM) SetObjectReference((OBJECTREF*)&pValue->m_type.m_enumName, gc.string); } } /*static*/ CustomAttributeManagedValues Attribute::GetManagedCaValue(CaValue* pCaVal) { WRAPPER_NO_CONTRACT; CustomAttributeManagedValues gc; ZeroMemory(&gc, sizeof(gc)); GCPROTECT_BEGIN(gc) { CorSerializationType type = pCaVal->type.tag; if (type == SERIALIZATION_TYPE_ENUM) { gc.string = StringObject::NewString(pCaVal->type.szEnumName, pCaVal->type.cEnumName); } else if (type == SERIALIZATION_TYPE_STRING) { gc.string = NULL; if (pCaVal->str.pStr) gc.string = StringObject::NewString(pCaVal->str.pStr, pCaVal->str.cbStr); } else if (type == SERIALIZATION_TYPE_TYPE) { gc.string = StringObject::NewString(pCaVal->str.pStr, pCaVal->str.cbStr); } else if (type == SERIALIZATION_TYPE_SZARRAY) { CorSerializationType arrayType = pCaVal->type.arrayType; ULONG length = pCaVal->arr.length; BOOL bAllBlittableCa = arrayType != SERIALIZATION_TYPE_ENUM; if (arrayType == SERIALIZATION_TYPE_ENUM) gc.string = StringObject::NewString(pCaVal->type.szEnumName, pCaVal->type.cEnumName); if (length != (ULONG)-1) { gc.array = (CaValueArrayREF)AllocateSzArray(TypeHandle(CoreLibBinder::GetClass(CLASS__CUSTOM_ATTRIBUTE_ENCODED_ARGUMENT)).MakeSZArray(), length); CustomAttributeValue* pValues = gc.array->GetDirectPointerToNonObjectElements(); for (COUNT_T i = 0; i < length; i ++) Attribute::SetBlittableCaValue(&pValues[i], &pCaVal->arr[i], &bAllBlittableCa); if (!bAllBlittableCa) { for (COUNT_T i = 0; i < length; i ++) { CustomAttributeManagedValues managedCaValue = Attribute::GetManagedCaValue(&pCaVal->arr[i]); Attribute::SetManagedValue( managedCaValue, &gc.array->GetDirectPointerToNonObjectElements()[i]); } } } } } GCPROTECT_END(); return gc; } /*static*/ HRESULT Attribute::ParseAttributeArgumentValues( void* pCa, INT32 cCa, CaValueArrayFactory* pCaValueArrayFactory, CaArg* pCaArgs, COUNT_T cArgs, CaNamedArg* pCaNamedArgs, COUNT_T cNamedArgs, DomainAssembly* pDomainAssembly) { WRAPPER_NO_CONTRACT; HRESULT hr = S_OK; CustomAttributeParser cap(pCa, cCa); IfFailGo(Attribute::ParseCaCtorArgs(cap, pCaArgs, cArgs, pCaValueArrayFactory, pDomainAssembly)); IfFailGo(Attribute::ParseCaNamedArgs(cap, pCaNamedArgs, cNamedArgs, pCaValueArrayFactory, pDomainAssembly)); ErrExit: return hr; } //--------------------------------------------------------------------------------------- // // Helper to parse the values for the ctor argument list and the named argument list. // HRESULT Attribute::ParseCaValue( CustomAttributeParser &ca, CaValue* pCaArg, CaType* pCaParam, CaValueArrayFactory* pCaValueArrayFactory, DomainAssembly* pDomainAssembly) { CONTRACTL { PRECONDITION(CheckPointer(pCaArg)); PRECONDITION(CheckPointer(pCaParam)); PRECONDITION(CheckPointer(pCaValueArrayFactory)); THROWS; } CONTRACTL_END; HRESULT hr = S_OK; CorSerializationType underlyingType; CaType elementType; if (pCaParam->tag == SERIALIZATION_TYPE_TAGGED_OBJECT) IfFailGo(Attribute::ParseCaType(ca, &pCaArg->type, pDomainAssembly)); else pCaArg->type = *pCaParam; underlyingType = pCaArg->type.tag == SERIALIZATION_TYPE_ENUM ? pCaArg->type.enumType : pCaArg->type.tag; // Grab the value. switch (underlyingType) { case SERIALIZATION_TYPE_BOOLEAN: case SERIALIZATION_TYPE_I1: case SERIALIZATION_TYPE_U1: IfFailGo(ca.GetU1(&pCaArg->u1)); break; case SERIALIZATION_TYPE_CHAR: case SERIALIZATION_TYPE_I2: case SERIALIZATION_TYPE_U2: IfFailGo(ca.GetU2(&pCaArg->u2)); break; case SERIALIZATION_TYPE_I4: case SERIALIZATION_TYPE_U4: IfFailGo(ca.GetU4(&pCaArg->u4)); break; case SERIALIZATION_TYPE_I8: case SERIALIZATION_TYPE_U8: IfFailGo(ca.GetU8(&pCaArg->u8)); break; case SERIALIZATION_TYPE_R4: IfFailGo(ca.GetR4(&pCaArg->r4)); break; case SERIALIZATION_TYPE_R8: IfFailGo(ca.GetR8(&pCaArg->r8)); break; case SERIALIZATION_TYPE_STRING: case SERIALIZATION_TYPE_TYPE: IfFailGo(ca.GetString(&pCaArg->str.pStr, &pCaArg->str.cbStr)); break; case SERIALIZATION_TYPE_SZARRAY: UINT32 len; IfFailGo(ca.GetU4(&len)); pCaArg->arr.length = len; pCaArg->arr.pSArray = NULL; if (pCaArg->arr.length == (ULONG)-1) break; IfNullGo(pCaArg->arr.pSArray = pCaValueArrayFactory->Create()); elementType.Init(pCaArg->type.arrayType, SERIALIZATION_TYPE_UNDEFINED, pCaArg->type.enumType, pCaArg->type.szEnumName, pCaArg->type.cEnumName); for (ULONG i = 0; i < pCaArg->arr.length; i++) IfFailGo(Attribute::ParseCaValue(ca, &*pCaArg->arr.pSArray->Append(), &elementType, pCaValueArrayFactory, pDomainAssembly)); break; default: // The format of the custom attribute record is invalid. hr = E_FAIL; break; } // End switch ErrExit: return hr; } /*static*/ HRESULT Attribute::ParseCaCtorArgs( CustomAttributeParser &ca, CaArg* pArgs, ULONG cArgs, CaValueArrayFactory* pCaValueArrayFactory, DomainAssembly* pDomainAssembly) { WRAPPER_NO_CONTRACT; HRESULT hr = S_OK; // A result. ULONG ix; // Loop control. // If there is a blob, check the prolog. if (FAILED(ca.ValidateProlog())) { IfFailGo(PostError(META_E_CA_INVALID_BLOB)); } // For each expected arg... for (ix=0; ix<cArgs; ++ix) { CaArg* pArg = &pArgs[ix]; IfFailGo(Attribute::ParseCaValue(ca, &pArg->val, &pArg->type, pCaValueArrayFactory, pDomainAssembly)); } ErrExit: return hr; } //--------------------------------------------------------------------------------------- // // Because ParseKnowCaNamedArgs MD cannot have VM dependency, we have our own implementation here: // 1. It needs to load the assemblies that contain the enum types for the named arguments, // 2. It Compares the enum type name with that of the loaded enum type, not the one in the CA record. // /*static*/ HRESULT Attribute::ParseCaNamedArgs( CustomAttributeParser &ca, CaNamedArg *pNamedParams, ULONG cNamedParams, CaValueArrayFactory* pCaValueArrayFactory, DomainAssembly* pDomainAssembly) { CONTRACTL { PRECONDITION(CheckPointer(pCaValueArrayFactory)); PRECONDITION(CheckPointer(pDomainAssembly)); THROWS; } CONTRACTL_END; HRESULT hr = S_OK; ULONG ixParam; INT32 ixArg; INT16 cActualArgs; CaNamedArgCtor namedArg; CaNamedArg* pNamedParam; // Get actual count of named arguments. if (FAILED(ca.GetI2(&cActualArgs))) cActualArgs = 0; // Everett behavior for (ixParam = 0; ixParam < cNamedParams; ixParam++) pNamedParams[ixParam].val.type.tag = SERIALIZATION_TYPE_UNDEFINED; // For each named argument... for (ixArg = 0; ixArg < cActualArgs; ixArg++) { // Field or property? IfFailGo(ca.GetTag(&namedArg.propertyOrField)); if (namedArg.propertyOrField != SERIALIZATION_TYPE_FIELD && namedArg.propertyOrField != SERIALIZATION_TYPE_PROPERTY) IfFailGo(PostError(META_E_CA_INVALID_ARGTYPE)); // Get argument type information CaType* pNamedArgType = &namedArg.type; StackSString ss; IfFailGo(Attribute::ParseCaType(ca, pNamedArgType, pDomainAssembly, &ss)); LPCSTR szLoadedEnumName = NULL; StackScratchBuffer buff; if (pNamedArgType->tag == SERIALIZATION_TYPE_ENUM || (pNamedArgType->tag == SERIALIZATION_TYPE_SZARRAY && pNamedArgType->arrayType == SERIALIZATION_TYPE_ENUM )) { szLoadedEnumName = ss.GetUTF8(buff); } // Get name of Arg. if (FAILED(ca.GetNonEmptyString(&namedArg.szName, &namedArg.cName))) IfFailGo(PostError(META_E_CA_INVALID_BLOB)); // Match arg by name and type for (ixParam = 0; ixParam < cNamedParams; ixParam++) { pNamedParam = &pNamedParams[ixParam]; // Match type if (pNamedParam->type.tag != SERIALIZATION_TYPE_TAGGED_OBJECT) { if (namedArg.type.tag != pNamedParam->type.tag) continue; // Match array type if (namedArg.type.tag == SERIALIZATION_TYPE_SZARRAY && pNamedParam->type.arrayType != SERIALIZATION_TYPE_TAGGED_OBJECT && namedArg.type.arrayType != pNamedParam->type.arrayType) continue; } // Match name (and its length to avoid substring matching) if ((pNamedParam->cName != namedArg.cName) || (strncmp(pNamedParam->szName, namedArg.szName, namedArg.cName) != 0)) { continue; } // If enum, match enum name. if (pNamedParam->type.tag == SERIALIZATION_TYPE_ENUM || (pNamedParam->type.tag == SERIALIZATION_TYPE_SZARRAY && pNamedParam->type.arrayType == SERIALIZATION_TYPE_ENUM )) { // pNamedParam->type.szEnumName: module->CA record->ctor token->loaded type->field/property->field/property type->field/property type name // namedArg.type.szEnumName: module->CA record->named arg->enum type name // szLoadedEnumName: module->CA record->named arg->enum type name->loaded enum type->loaded enum type name // Comparing pNamedParam->type.szEnumName against namedArg.type.szEnumName could fail if we loaded a different version // of the enum type than the one specified in the CA record. So we are comparing it against szLoadedEnumName instead. if (strncmp(pNamedParam->type.szEnumName, szLoadedEnumName, pNamedParam->type.cEnumName) != 0) continue; if (namedArg.type.enumType != pNamedParam->type.enumType) { MAKE_WIDEPTR_FROMUTF8N(pWideStr, pNamedParam->type.szEnumName, pNamedParam->type.cEnumName) IfFailGo(PostError(META_E_CA_UNEXPECTED_TYPE, wcslen(pWideStr), pWideStr)); } // TODO: For now assume the property\field array size is correct - later we should verify this } // Found a match. break; } // Better have found an argument. if (ixParam == cNamedParams) { MAKE_WIDEPTR_FROMUTF8N(pWideStr, namedArg.szName, namedArg.cName) IfFailGo(PostError(META_E_CA_UNKNOWN_ARGUMENT, wcslen(pWideStr), pWideStr)); } // Argument had better not have been seen already. if (pNamedParams[ixParam].val.type.tag != SERIALIZATION_TYPE_UNDEFINED) { MAKE_WIDEPTR_FROMUTF8N(pWideStr, namedArg.szName, namedArg.cName) IfFailGo(PostError(META_E_CA_REPEATED_ARG, wcslen(pWideStr), pWideStr)); } IfFailGo(Attribute::ParseCaValue(ca, &pNamedParams[ixParam].val, &namedArg.type, pCaValueArrayFactory, pDomainAssembly)); } ErrExit: return hr; } /*static*/ HRESULT Attribute::InitCaType(CustomAttributeType* pType, Factory<SString>* pSstringFactory, Factory<StackScratchBuffer>* pStackScratchBufferFactory, CaType* pCaType) { CONTRACTL { THROWS; PRECONDITION(CheckPointer(pType)); PRECONDITION(CheckPointer(pSstringFactory)); PRECONDITION(CheckPointer(pStackScratchBufferFactory)); PRECONDITION(CheckPointer(pCaType)); } CONTRACTL_END; HRESULT hr = S_OK; SString* psszName = NULL; StackScratchBuffer* scratchBuffer = NULL; IfNullGo(psszName = pSstringFactory->Create()); IfNullGo(scratchBuffer = pStackScratchBufferFactory->Create()); psszName->Set(pType->m_enumName == NULL ? NULL : pType->m_enumName->GetBuffer()); pCaType->Init( pType->m_tag, pType->m_arrayType, pType->m_enumType, psszName->GetUTF8(*scratchBuffer), (ULONG)psszName->GetCount()); ErrExit: return hr; } FCIMPL5(VOID, Attribute::ParseAttributeArguments, void* pCa, INT32 cCa, CaArgArrayREF* ppCustomAttributeArguments, CaNamedArgArrayREF* ppCustomAttributeNamedArguments, AssemblyBaseObject* pAssemblyUNSAFE) { FCALL_CONTRACT; ASSEMBLYREF refAssembly = (ASSEMBLYREF)ObjectToOBJECTREF(pAssemblyUNSAFE); HELPER_METHOD_FRAME_BEGIN_1(refAssembly) { DomainAssembly *pDomainAssembly = refAssembly->GetDomainAssembly(); struct { CustomAttributeArgument* pArgs; CustomAttributeNamedArgument* pNamedArgs; } gc; gc.pArgs = NULL; gc.pNamedArgs = NULL; HRESULT hr = S_OK; GCPROTECT_BEGININTERIOR(gc); BOOL bAllBlittableCa = TRUE; COUNT_T cArgs = 0; COUNT_T cNamedArgs = 0; CaArg* pCaArgs = NULL; CaNamedArg* pCaNamedArgs = NULL; #ifdef __GNUC__ // When compiling under GCC we have to use the -fstack-check option to ensure we always spot stack // overflow. But this option is intolerant of locals growing too large, so we have to cut back a bit // on what we can allocate inline here. Leave the Windows versions alone to retain the perf benefits // since we don't have the same constraints. NewHolder<CaValueArrayFactory> pCaValueArrayFactory = new InlineFactory<SArray<CaValue>, 4>(); InlineFactory<StackScratchBuffer, 4> stackScratchBufferFactory; InlineFactory<SString, 4> sstringFactory; #else // __GNUC__ // Preallocate 4 elements in each of the following factories for optimal performance. // 4 is enough for 4 typed args or 2 named args which are enough for 99% of the cases. // SArray<CaValue> is only needed if a argument is an array, don't preallocate any memory as arrays are rare. // Need one per (ctor or named) arg + one per array element InlineFactory<SArray<CaValue>, 4> caValueArrayFactory; InlineFactory<SArray<CaValue>, 4> *pCaValueArrayFactory = &caValueArrayFactory; // Need one StackScratchBuffer per ctor arg and two per named arg InlineFactory<StackScratchBuffer, 4> stackScratchBufferFactory; // Need one SString per ctor arg and two per named arg InlineFactory<SString, 4> sstringFactory; #endif // __GNUC__ cArgs = (*ppCustomAttributeArguments)->GetNumComponents(); if (cArgs) { gc.pArgs = (*ppCustomAttributeArguments)->GetDirectPointerToNonObjectElements(); size_t size = sizeof(CaArg) * cArgs; if ((size / sizeof(CaArg)) != cArgs) // uint over/underflow IfFailGo(E_INVALIDARG); pCaArgs = (CaArg*)_alloca(size); for (COUNT_T i = 0; i < cArgs; i ++) { CaType caType; IfFailGo(Attribute::InitCaType(&gc.pArgs[i].m_type, &sstringFactory, &stackScratchBufferFactory, &caType)); pCaArgs[i].Init(caType); } } cNamedArgs = (*ppCustomAttributeNamedArguments)->GetNumComponents(); if (cNamedArgs) { gc.pNamedArgs = (*ppCustomAttributeNamedArguments)->GetDirectPointerToNonObjectElements(); size_t size = sizeof(CaNamedArg) * cNamedArgs; if ((size / sizeof(CaNamedArg)) != cNamedArgs) // uint over/underflow IfFailGo(E_INVALIDARG); pCaNamedArgs = (CaNamedArg*)_alloca(size); for (COUNT_T i = 0; i < cNamedArgs; i ++) { CustomAttributeNamedArgument* pNamedArg = &gc.pNamedArgs[i]; CaType caType; IfFailGo(Attribute::InitCaType(&pNamedArg->m_type, &sstringFactory, &stackScratchBufferFactory, &caType)); SString* psszName = NULL; IfNullGo(psszName = sstringFactory.Create()); psszName->Set(pNamedArg->m_argumentName->GetBuffer()); StackScratchBuffer* scratchBuffer = NULL; IfNullGo(scratchBuffer = stackScratchBufferFactory.Create()); pCaNamedArgs[i].Init( psszName->GetUTF8(*scratchBuffer), pNamedArg->m_propertyOrField, caType); } } // This call maps the named parameters (fields and arguments) and ctor parameters with the arguments in the CA record // and retrieve their values. IfFailGo(Attribute::ParseAttributeArgumentValues(pCa, cCa, pCaValueArrayFactory, pCaArgs, cArgs, pCaNamedArgs, cNamedArgs, pDomainAssembly)); for (COUNT_T i = 0; i < cArgs; i ++) Attribute::SetBlittableCaValue(&gc.pArgs[i].m_value, &pCaArgs[i].val, &bAllBlittableCa); for (COUNT_T i = 0; i < cNamedArgs; i ++) Attribute::SetBlittableCaValue(&gc.pNamedArgs[i].m_value, &pCaNamedArgs[i].val, &bAllBlittableCa); if (!bAllBlittableCa) { for (COUNT_T i = 0; i < cArgs; i ++) { CustomAttributeManagedValues managedCaValue = Attribute::GetManagedCaValue(&pCaArgs[i].val); Attribute::SetManagedValue(managedCaValue, &(gc.pArgs[i].m_value)); } for (COUNT_T i = 0; i < cNamedArgs; i++) { CustomAttributeManagedValues managedCaValue = Attribute::GetManagedCaValue(&pCaNamedArgs[i].val); Attribute::SetManagedValue(managedCaValue, &(gc.pNamedArgs[i].m_value)); } } ErrExit: ; // Need empty statement to get GCPROTECT_END below to work. GCPROTECT_END(); if (hr != S_OK) { if ((hr == E_OUTOFMEMORY) || (hr == NTE_NO_MEMORY)) { COMPlusThrow(kOutOfMemoryException); } else { COMPlusThrow(kCustomAttributeFormatException); } } } HELPER_METHOD_FRAME_END(); } FCIMPLEND FCIMPL6(LPVOID, COMCustomAttribute::CreateCaObject, ReflectModuleBaseObject* pAttributedModuleUNSAFE, ReflectClassBaseObject* pCaTypeUNSAFE, ReflectMethodObject *pMethodUNSAFE, BYTE** ppBlob, BYTE* pEndBlob, INT32* pcNamedArgs) { FCALL_CONTRACT; struct { REFLECTCLASSBASEREF refCaType; OBJECTREF ca; REFLECTMETHODREF refCtor; REFLECTMODULEBASEREF refAttributedModule; } gc; gc.refCaType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pCaTypeUNSAFE); TypeHandle th = gc.refCaType->GetType(); gc.ca = NULL; gc.refCtor = (REFLECTMETHODREF)ObjectToOBJECTREF(pMethodUNSAFE); gc.refAttributedModule = (REFLECTMODULEBASEREF)ObjectToOBJECTREF(pAttributedModuleUNSAFE); if(gc.refAttributedModule == NULL) FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle")); MethodDesc* pCtorMD = gc.refCtor->GetMethod(); HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc); { MethodDescCallSite ctorCallSite(pCtorMD, th); MetaSig* pSig = ctorCallSite.GetMetaSig(); BYTE* pBlob = *ppBlob; // get the number of arguments and allocate an array for the args ARG_SLOT *args = NULL; UINT cArgs = pSig->NumFixedArgs() + 1; // make room for the this pointer UINT i = 1; // used to flag that we actually get the right number of arg from the blob args = (ARG_SLOT*)_alloca(cArgs * sizeof(ARG_SLOT)); memset((void*)args, 0, cArgs * sizeof(ARG_SLOT)); OBJECTREF *argToProtect = (OBJECTREF*)_alloca(cArgs * sizeof(OBJECTREF)); memset((void*)argToProtect, 0, cArgs * sizeof(OBJECTREF)); // load the this pointer argToProtect[0] = gc.refCaType->GetType().GetMethodTable()->Allocate(); // this is the value to return after the ctor invocation if (pBlob) { if (pBlob < pEndBlob) { if (pBlob + 2 > pEndBlob) { COMPlusThrow(kCustomAttributeFormatException); } INT16 prolog = GET_UNALIGNED_VAL16(pBlob); if (prolog != 1) COMPlusThrow(kCustomAttributeFormatException); pBlob += 2; } if (cArgs > 1) { GCPROTECT_ARRAY_BEGIN(*argToProtect, cArgs); { // loop through the args for (i = 1; i < cArgs; i++) { CorElementType type = pSig->NextArg(); if (type == ELEMENT_TYPE_END) break; BOOL bObjectCreated = FALSE; TypeHandle th = pSig->GetLastTypeHandleThrowing(); if (th.IsArray()) // get the array element th = th.GetArrayElementTypeHandle(); ARG_SLOT data = GetDataFromBlob(pCtorMD->GetAssembly(), (CorSerializationType)type, th, &pBlob, pEndBlob, gc.refAttributedModule->GetModule(), &bObjectCreated); if (bObjectCreated) argToProtect[i] = ArgSlotToObj(data); else args[i] = data; } } GCPROTECT_END(); // We have borrowed the signature from MethodDescCallSite. We have to put it back into the initial position // because of that's where MethodDescCallSite expects to find it below. pSig->Reset(); for (i = 1; i < cArgs; i++) { if (argToProtect[i] != NULL) { _ASSERTE(args[i] == NULL); args[i] = ObjToArgSlot(argToProtect[i]); } } } } args[0] = ObjToArgSlot(argToProtect[0]); if (i != cArgs) COMPlusThrow(kCustomAttributeFormatException); // check if there are any named properties to invoke, // if so set the by ref int passed in to point // to the blob position where name properties start *pcNamedArgs = 0; if (pBlob && pBlob != pEndBlob) { if (pBlob + 2 > pEndBlob) COMPlusThrow(kCustomAttributeFormatException); *pcNamedArgs = GET_UNALIGNED_VAL16(pBlob); pBlob += 2; } *ppBlob = pBlob; if (*pcNamedArgs == 0 && pBlob != pEndBlob) COMPlusThrow(kCustomAttributeFormatException); // make the invocation to the ctor gc.ca = ArgSlotToObj(args[0]); if (pCtorMD->GetMethodTable()->IsValueType()) args[0] = PtrToArgSlot(OBJECTREFToObject(gc.ca)->UnBox()); ctorCallSite.CallWithValueTypes(args); } HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(gc.ca); } FCIMPLEND FCIMPL5(VOID, COMCustomAttribute::ParseAttributeUsageAttribute, PVOID pData, ULONG cData, ULONG* pTargets, CLR_BOOL* pInherited, CLR_BOOL* pAllowMultiple) { FCALL_CONTRACT; int inherited = 0; int allowMultiple = 1; { CustomAttributeParser ca(pData, cData); CaArg args[1]; args[0].InitEnum(SERIALIZATION_TYPE_I4, 0); if (FAILED(::ParseKnownCaArgs(ca, args, ARRAY_SIZE(args)))) { HELPER_METHOD_FRAME_BEGIN_0(); COMPlusThrow(kCustomAttributeFormatException); HELPER_METHOD_FRAME_END(); } *pTargets = args[0].val.u4; CaNamedArg namedArgs[2]; CaType namedArgTypes[2]; namedArgTypes[inherited].Init(SERIALIZATION_TYPE_BOOLEAN); namedArgTypes[allowMultiple].Init(SERIALIZATION_TYPE_BOOLEAN); namedArgs[inherited].Init("Inherited", SERIALIZATION_TYPE_PROPERTY, namedArgTypes[inherited], TRUE); namedArgs[allowMultiple].Init("AllowMultiple", SERIALIZATION_TYPE_PROPERTY, namedArgTypes[allowMultiple], FALSE); if (FAILED(::ParseKnownCaNamedArgs(ca, namedArgs, ARRAY_SIZE(namedArgs)))) { HELPER_METHOD_FRAME_BEGIN_0(); COMPlusThrow(kCustomAttributeFormatException); HELPER_METHOD_FRAME_END(); } *pInherited = namedArgs[inherited].val.boolean == TRUE; *pAllowMultiple = namedArgs[allowMultiple].val.boolean == TRUE; } } FCIMPLEND FCIMPL7(void, COMCustomAttribute::GetPropertyOrFieldData, ReflectModuleBaseObject *pModuleUNSAFE, BYTE** ppBlobStart, BYTE* pBlobEnd, STRINGREF* pName, CLR_BOOL* pbIsProperty, OBJECTREF* pType, OBJECTREF* value) { FCALL_CONTRACT; BYTE* pBlob = *ppBlobStart; *pType = NULL; REFLECTMODULEBASEREF refModule = (REFLECTMODULEBASEREF)ObjectToOBJECTREF(pModuleUNSAFE); if(refModule == NULL) FCThrowResVoid(kArgumentNullException, W("Arg_InvalidHandle")); Module *pModule = refModule->GetModule(); HELPER_METHOD_FRAME_BEGIN_1(refModule); { Assembly *pCtorAssembly = NULL; MethodTable *pMTValue = NULL; CorSerializationType arrayType = SERIALIZATION_TYPE_BOOLEAN; BOOL bObjectCreated = FALSE; TypeHandle nullTH; if (pBlob + 2 > pBlobEnd) COMPlusThrow(kCustomAttributeFormatException); // get whether it is a field or a property CorSerializationType propOrField = (CorSerializationType)*pBlob; pBlob++; if (propOrField == SERIALIZATION_TYPE_FIELD) *pbIsProperty = FALSE; else if (propOrField == SERIALIZATION_TYPE_PROPERTY) *pbIsProperty = TRUE; else COMPlusThrow(kCustomAttributeFormatException); // get the type of the field CorSerializationType fieldType = (CorSerializationType)*pBlob; pBlob++; if (fieldType == SERIALIZATION_TYPE_SZARRAY) { arrayType = (CorSerializationType)*pBlob; if (pBlob + 1 > pBlobEnd) COMPlusThrow(kCustomAttributeFormatException); pBlob++; } if (fieldType == SERIALIZATION_TYPE_ENUM || arrayType == SERIALIZATION_TYPE_ENUM) { // get the enum type ReflectClassBaseObject *pEnum = (ReflectClassBaseObject*)OBJECTREFToObject(ArgSlotToObj(GetDataFromBlob( pCtorAssembly, SERIALIZATION_TYPE_TYPE, nullTH, &pBlob, pBlobEnd, pModule, &bObjectCreated))); if (pEnum == NULL) COMPlusThrow(kCustomAttributeFormatException); _ASSERTE(bObjectCreated); TypeHandle th = pEnum->GetType(); _ASSERTE(th.IsEnum()); pMTValue = th.AsMethodTable(); if (fieldType == SERIALIZATION_TYPE_ENUM) // load the enum type to pass it back *pType = th.GetManagedClassObject(); else nullTH = th; } // get the string representing the field/property name *pName = ArgSlotToString(GetDataFromBlob( pCtorAssembly, SERIALIZATION_TYPE_STRING, nullTH, &pBlob, pBlobEnd, pModule, &bObjectCreated)); _ASSERTE(bObjectCreated || *pName == NULL); // create the object and return it switch (fieldType) { case SERIALIZATION_TYPE_TAGGED_OBJECT: *pType = g_pObjectClass->GetManagedClassObject(); FALLTHROUGH; case SERIALIZATION_TYPE_TYPE: case SERIALIZATION_TYPE_STRING: *value = ArgSlotToObj(GetDataFromBlob( pCtorAssembly, fieldType, nullTH, &pBlob, pBlobEnd, pModule, &bObjectCreated)); _ASSERTE(bObjectCreated || *value == NULL); if (*value == NULL) { // load the proper type so that code in managed knows which property to load if (fieldType == SERIALIZATION_TYPE_STRING) *pType = CoreLibBinder::GetElementType(ELEMENT_TYPE_STRING)->GetManagedClassObject(); else if (fieldType == SERIALIZATION_TYPE_TYPE) *pType = CoreLibBinder::GetClass(CLASS__TYPE)->GetManagedClassObject(); } break; case SERIALIZATION_TYPE_SZARRAY: { *value = NULL; int arraySize = (int)GetDataFromBlob(pCtorAssembly, SERIALIZATION_TYPE_I4, nullTH, &pBlob, pBlobEnd, pModule, &bObjectCreated); if (arraySize != -1) { _ASSERTE(!bObjectCreated); if (arrayType == SERIALIZATION_TYPE_STRING) nullTH = TypeHandle(CoreLibBinder::GetElementType(ELEMENT_TYPE_STRING)); else if (arrayType == SERIALIZATION_TYPE_TYPE) nullTH = TypeHandle(CoreLibBinder::GetClass(CLASS__TYPE)); else if (arrayType == SERIALIZATION_TYPE_TAGGED_OBJECT) nullTH = TypeHandle(g_pObjectClass); ReadArray(pCtorAssembly, arrayType, arraySize, nullTH, &pBlob, pBlobEnd, pModule, (BASEARRAYREF*)value); } if (*value == NULL) { TypeHandle arrayTH; switch (arrayType) { case SERIALIZATION_TYPE_STRING: arrayTH = TypeHandle(CoreLibBinder::GetElementType(ELEMENT_TYPE_STRING)); break; case SERIALIZATION_TYPE_TYPE: arrayTH = TypeHandle(CoreLibBinder::GetClass(CLASS__TYPE)); break; case SERIALIZATION_TYPE_TAGGED_OBJECT: arrayTH = TypeHandle(g_pObjectClass); break; default: if (SERIALIZATION_TYPE_BOOLEAN <= arrayType && arrayType <= SERIALIZATION_TYPE_R8) arrayTH = TypeHandle(CoreLibBinder::GetElementType((CorElementType)arrayType)); } if (!arrayTH.IsNull()) { arrayTH = ClassLoader::LoadArrayTypeThrowing(arrayTH); *pType = arrayTH.GetManagedClassObject(); } } break; } default: if (SERIALIZATION_TYPE_BOOLEAN <= fieldType && fieldType <= SERIALIZATION_TYPE_R8) pMTValue = CoreLibBinder::GetElementType((CorElementType)fieldType); else if(fieldType == SERIALIZATION_TYPE_ENUM) fieldType = (CorSerializationType)pMTValue->GetInternalCorElementType(); else COMPlusThrow(kCustomAttributeFormatException); ARG_SLOT val = GetDataFromBlob(pCtorAssembly, fieldType, nullTH, &pBlob, pBlobEnd, pModule, &bObjectCreated); _ASSERTE(!bObjectCreated); *value = pMTValue->Box((void*)ArgSlotEndianessFixup(&val, pMTValue->GetNumInstanceFieldBytes())); } *ppBlobStart = pBlob; } HELPER_METHOD_FRAME_END(); } FCIMPLEND /*static*/ TypeHandle COMCustomAttribute::GetTypeHandleFromBlob(Assembly *pCtorAssembly, CorSerializationType objType, BYTE **pBlob, const BYTE *endBlob, Module *pModule) { CONTRACTL { THROWS; } CONTRACTL_END; // we must box which means we must get the method table, switch again on the element type MethodTable *pMTType = NULL; TypeHandle nullTH; TypeHandle RtnTypeHnd; switch ((DWORD)objType) { case SERIALIZATION_TYPE_BOOLEAN: case SERIALIZATION_TYPE_I1: case SERIALIZATION_TYPE_U1: case SERIALIZATION_TYPE_CHAR: case SERIALIZATION_TYPE_I2: case SERIALIZATION_TYPE_U2: case SERIALIZATION_TYPE_I4: case SERIALIZATION_TYPE_U4: case SERIALIZATION_TYPE_R4: case SERIALIZATION_TYPE_I8: case SERIALIZATION_TYPE_U8: case SERIALIZATION_TYPE_R8: case SERIALIZATION_TYPE_STRING: pMTType = CoreLibBinder::GetElementType((CorElementType)objType); RtnTypeHnd = TypeHandle(pMTType); break; case ELEMENT_TYPE_CLASS: pMTType = CoreLibBinder::GetClass(CLASS__TYPE); RtnTypeHnd = TypeHandle(pMTType); break; case SERIALIZATION_TYPE_TAGGED_OBJECT: pMTType = g_pObjectClass; RtnTypeHnd = TypeHandle(pMTType); break; case SERIALIZATION_TYPE_TYPE: { int size = GetStringSize(pBlob, endBlob); if (size == -1) return nullTH; if ((size+1 <= 1) || (size > endBlob - *pBlob)) COMPlusThrow(kCustomAttributeFormatException); LPUTF8 szName = (LPUTF8)_alloca(size + 1); memcpy(szName, *pBlob, size); *pBlob += size; szName[size] = 0; RtnTypeHnd = TypeName::GetTypeUsingCASearchRules(szName, pModule->GetAssembly(), NULL, FALSE); break; } case SERIALIZATION_TYPE_ENUM: { // get the enum type BOOL isObject = FALSE; ReflectClassBaseObject *pType = (ReflectClassBaseObject*)OBJECTREFToObject(ArgSlotToObj(GetDataFromBlob(pCtorAssembly, SERIALIZATION_TYPE_TYPE, nullTH, pBlob, endBlob, pModule, &isObject))); if (pType != NULL) { _ASSERTE(isObject); RtnTypeHnd = pType->GetType(); _ASSERTE((objType == SERIALIZATION_TYPE_ENUM) ? RtnTypeHnd.GetMethodTable()->IsEnum() : TRUE); } else { RtnTypeHnd = TypeHandle(); } break; } default: COMPlusThrow(kCustomAttributeFormatException); } return RtnTypeHnd; } // retrieve the string size in a CA blob. Advance the blob pointer to point to // the beginning of the string immediately following the size /*static*/ int COMCustomAttribute::GetStringSize(BYTE **pBlob, const BYTE *endBlob) { CONTRACTL { THROWS; } CONTRACTL_END; if (*pBlob >= endBlob ) { // No buffer at all, or buffer overrun COMPlusThrow(kCustomAttributeFormatException); } if (**pBlob == 0xFF) { // Special case null string. ++(*pBlob); return -1; } ULONG ulSize; if (FAILED(CPackedLen::SafeGetData((BYTE const *)*pBlob, (BYTE const *)endBlob, (ULONG *)&ulSize, (BYTE const **)pBlob))) { COMPlusThrow(kCustomAttributeFormatException); } return (int)ulSize; } // copy the values of an array of integers from a CA blob // (i.e., always stored in little-endian, and needs not be aligned). // Returns TRUE on success, FALSE if the blob was not big enough. // Advances *pBlob by the amount copied. /*static*/ template < typename T > BOOL COMCustomAttribute::CopyArrayVAL(BASEARRAYREF pArray, int nElements, BYTE **pBlob, const BYTE *endBlob) { int sizeData; // = size * 2; with integer overflow check if (!ClrSafeInt<int>::multiply(nElements, sizeof(T), sizeData)) return FALSE; if (*pBlob + sizeData < *pBlob) // integer overflow check return FALSE; if (*pBlob + sizeData > endBlob) return FALSE; #if BIGENDIAN T *ptDest = reinterpret_cast<T *>(pArray->GetDataPtr()); for (int iElement = 0; iElement < nElements; iElement++) { T tValue; BYTE *pbSrc = *pBlob + iElement * sizeof(T); BYTE *pbDest = reinterpret_cast<BYTE *>(&tValue); for (size_t iByte = 0; iByte < sizeof(T); iByte++) { pbDest[sizeof(T) - 1 - iByte] = pbSrc[iByte]; } ptDest[iElement] = tValue; } #else // BIGENDIAN memcpyNoGCRefs(pArray->GetDataPtr(), *pBlob, sizeData); #endif // BIGENDIAN *pBlob += sizeData; return TRUE; } // read the whole array as a chunk /*static*/ void COMCustomAttribute::ReadArray(Assembly *pCtorAssembly, CorSerializationType arrayType, int size, TypeHandle th, BYTE **pBlob, const BYTE *endBlob, Module *pModule, BASEARRAYREF *pArray) { CONTRACTL { THROWS; } CONTRACTL_END; ARG_SLOT element = 0; switch ((DWORD)arrayType) { case SERIALIZATION_TYPE_BOOLEAN: case SERIALIZATION_TYPE_I1: case SERIALIZATION_TYPE_U1: *pArray = (BASEARRAYREF)AllocatePrimitiveArray((CorElementType)arrayType, size); if (!CopyArrayVAL<BYTE>(*pArray, size, pBlob, endBlob)) goto badBlob; break; case SERIALIZATION_TYPE_CHAR: case SERIALIZATION_TYPE_I2: case SERIALIZATION_TYPE_U2: { *pArray = (BASEARRAYREF)AllocatePrimitiveArray((CorElementType)arrayType, size); if (!CopyArrayVAL<UINT16>(*pArray, size, pBlob, endBlob)) goto badBlob; break; } case SERIALIZATION_TYPE_I4: case SERIALIZATION_TYPE_U4: case SERIALIZATION_TYPE_R4: { *pArray = (BASEARRAYREF)AllocatePrimitiveArray((CorElementType)arrayType, size); if (!CopyArrayVAL<UINT32>(*pArray, size, pBlob, endBlob)) goto badBlob; break; } case SERIALIZATION_TYPE_I8: case SERIALIZATION_TYPE_U8: case SERIALIZATION_TYPE_R8: { *pArray = (BASEARRAYREF)AllocatePrimitiveArray((CorElementType)arrayType, size); if (!CopyArrayVAL<UINT64>(*pArray, size, pBlob, endBlob)) goto badBlob; break; } case ELEMENT_TYPE_CLASS: case SERIALIZATION_TYPE_TYPE: case SERIALIZATION_TYPE_STRING: case SERIALIZATION_TYPE_SZARRAY: case SERIALIZATION_TYPE_TAGGED_OBJECT: { BOOL isObject; // If we haven't figured out the type of the array, throw bad blob exception if (th.IsNull()) goto badBlob; *pArray = (BASEARRAYREF)AllocateObjectArray(size, th); if (arrayType == SERIALIZATION_TYPE_SZARRAY) // switch the th to be the proper one th = th.GetArrayElementTypeHandle(); for (int i = 0; i < size; i++) { element = GetDataFromBlob(pCtorAssembly, arrayType, th, pBlob, endBlob, pModule, &isObject); _ASSERTE(isObject || element == NULL); ((PTRARRAYREF)(*pArray))->SetAt(i, ArgSlotToObj(element)); } break; } case SERIALIZATION_TYPE_ENUM: { INT32 bounds = size; // If we haven't figured out the type of the array, throw bad blob exception if (th.IsNull()) goto badBlob; unsigned elementSize = th.GetSize(); TypeHandle arrayHandle = ClassLoader::LoadArrayTypeThrowing(th); if (arrayHandle.IsNull()) goto badBlob; *pArray = (BASEARRAYREF)AllocateSzArray(arrayHandle, bounds); BOOL fSuccess; switch (elementSize) { case 1: fSuccess = CopyArrayVAL<BYTE>(*pArray, size, pBlob, endBlob); break; case 2: fSuccess = CopyArrayVAL<UINT16>(*pArray, size, pBlob, endBlob); break; case 4: fSuccess = CopyArrayVAL<UINT32>(*pArray, size, pBlob, endBlob); break; case 8: fSuccess = CopyArrayVAL<UINT64>(*pArray, size, pBlob, endBlob); break; default: fSuccess = FALSE; } if (!fSuccess) goto badBlob; break; } default: badBlob: COMPlusThrow(kCustomAttributeFormatException); } } // get data out of the blob according to a CorElementType /*static*/ ARG_SLOT COMCustomAttribute::GetDataFromBlob(Assembly *pCtorAssembly, CorSerializationType type, TypeHandle th, BYTE **pBlob, const BYTE *endBlob, Module *pModule, BOOL *bObjectCreated) { CONTRACTL { THROWS; } CONTRACTL_END; ARG_SLOT retValue = 0; *bObjectCreated = FALSE; TypeHandle nullTH; TypeHandle typeHnd; switch ((DWORD)type) { case SERIALIZATION_TYPE_BOOLEAN: case SERIALIZATION_TYPE_I1: case SERIALIZATION_TYPE_U1: if (*pBlob + 1 <= endBlob) { retValue = (ARG_SLOT)**pBlob; *pBlob += 1; break; } goto badBlob; case SERIALIZATION_TYPE_CHAR: case SERIALIZATION_TYPE_I2: case SERIALIZATION_TYPE_U2: if (*pBlob + 2 <= endBlob) { retValue = (ARG_SLOT)GET_UNALIGNED_VAL16(*pBlob); *pBlob += 2; break; } goto badBlob; case SERIALIZATION_TYPE_I4: case SERIALIZATION_TYPE_U4: case SERIALIZATION_TYPE_R4: if (*pBlob + 4 <= endBlob) { retValue = (ARG_SLOT)GET_UNALIGNED_VAL32(*pBlob); *pBlob += 4; break; } goto badBlob; case SERIALIZATION_TYPE_I8: case SERIALIZATION_TYPE_U8: case SERIALIZATION_TYPE_R8: if (*pBlob + 8 <= endBlob) { retValue = (ARG_SLOT)GET_UNALIGNED_VAL64(*pBlob); *pBlob += 8; break; } goto badBlob; case SERIALIZATION_TYPE_STRING: stringType: { int size = GetStringSize(pBlob, endBlob); *bObjectCreated = TRUE; if (size > 0) { if (*pBlob + size < *pBlob) // integer overflow check goto badBlob; if (*pBlob + size > endBlob) goto badBlob; retValue = ObjToArgSlot(StringObject::NewString((LPCUTF8)*pBlob, size)); *pBlob += size; } else if (size == 0) retValue = ObjToArgSlot(StringObject::NewString(0)); else *bObjectCreated = FALSE; break; } // this is coming back from sig but it's not a serialization type, // essentialy the type in the blob and the type in the sig don't match case ELEMENT_TYPE_VALUETYPE: { if (!th.IsEnum()) goto badBlob; CorSerializationType enumType = (CorSerializationType)th.GetInternalCorElementType(); BOOL cannotBeObject = FALSE; retValue = GetDataFromBlob(pCtorAssembly, enumType, nullTH, pBlob, endBlob, pModule, &cannotBeObject); _ASSERTE(!cannotBeObject); break; } // this is coming back from sig but it's not a serialization type, // essentialy the type in the blob and the type in the sig don't match case ELEMENT_TYPE_CLASS: if (th.IsArray()) goto typeArray; else { MethodTable *pMT = th.AsMethodTable(); if (pMT == g_pStringClass) goto stringType; else if (pMT == g_pObjectClass) goto typeObject; else if (CoreLibBinder::IsClass(pMT, CLASS__TYPE)) goto typeType; } goto badBlob; case SERIALIZATION_TYPE_TYPE: typeType: { typeHnd = GetTypeHandleFromBlob(pCtorAssembly, SERIALIZATION_TYPE_TYPE, pBlob, endBlob, pModule); if (!typeHnd.IsNull()) retValue = ObjToArgSlot(typeHnd.GetManagedClassObject()); *bObjectCreated = TRUE; break; } // this is coming back from sig but it's not a serialization type, // essentialy the type in the blob and the type in the sig don't match case ELEMENT_TYPE_OBJECT: case SERIALIZATION_TYPE_TAGGED_OBJECT: typeObject: { // get the byte representing the real type and call GetDataFromBlob again if (*pBlob + 1 > endBlob) goto badBlob; CorSerializationType objType = (CorSerializationType)**pBlob; *pBlob += 1; switch (objType) { case SERIALIZATION_TYPE_SZARRAY: { if (*pBlob + 1 > endBlob) goto badBlob; CorSerializationType arrayType = (CorSerializationType)**pBlob; *pBlob += 1; if (arrayType == SERIALIZATION_TYPE_TYPE) arrayType = (CorSerializationType)ELEMENT_TYPE_CLASS; // grab the array type and make a type handle for it nullTH = GetTypeHandleFromBlob(pCtorAssembly, arrayType, pBlob, endBlob, pModule); FALLTHROUGH; } case SERIALIZATION_TYPE_TYPE: case SERIALIZATION_TYPE_STRING: // notice that the nullTH is actually not null in the array case (see case above) retValue = GetDataFromBlob(pCtorAssembly, objType, nullTH, pBlob, endBlob, pModule, bObjectCreated); _ASSERTE(*bObjectCreated || retValue == 0); break; case SERIALIZATION_TYPE_ENUM: { // // get the enum type typeHnd = GetTypeHandleFromBlob(pCtorAssembly, SERIALIZATION_TYPE_ENUM, pBlob, endBlob, pModule); _ASSERTE(typeHnd.IsTypeDesc() == false); // ok we have the class, now we go and read the data MethodTable *pMT = typeHnd.AsMethodTable(); PREFIX_ASSUME(pMT != NULL); CorSerializationType objNormType = (CorSerializationType)pMT->GetInternalCorElementType(); BOOL isObject = FALSE; retValue = GetDataFromBlob(pCtorAssembly, objNormType, nullTH, pBlob, endBlob, pModule, &isObject); _ASSERTE(!isObject); retValue= ObjToArgSlot(pMT->Box((void*)&retValue)); *bObjectCreated = TRUE; break; } default: { // the common primitive type case. We need to box the primitive typeHnd = GetTypeHandleFromBlob(pCtorAssembly, objType, pBlob, endBlob, pModule); _ASSERTE(typeHnd.IsTypeDesc() == false); retValue = GetDataFromBlob(pCtorAssembly, objType, nullTH, pBlob, endBlob, pModule, bObjectCreated); _ASSERTE(!*bObjectCreated); retValue= ObjToArgSlot(typeHnd.AsMethodTable()->Box((void*)&retValue)); *bObjectCreated = TRUE; break; } } break; } case SERIALIZATION_TYPE_SZARRAY: typeArray: { // read size BOOL isObject = FALSE; int size = (int)GetDataFromBlob(pCtorAssembly, SERIALIZATION_TYPE_I4, nullTH, pBlob, endBlob, pModule, &isObject); _ASSERTE(!isObject); if (size != -1) { CorSerializationType arrayType; if (th.IsEnum()) arrayType = SERIALIZATION_TYPE_ENUM; else arrayType = (CorSerializationType)th.GetInternalCorElementType(); BASEARRAYREF array = NULL; GCPROTECT_BEGIN(array); ReadArray(pCtorAssembly, arrayType, size, th, pBlob, endBlob, pModule, &array); retValue = ObjToArgSlot(array); GCPROTECT_END(); } *bObjectCreated = TRUE; break; } default: badBlob: //<TODO> generate a reasonable text string ("invalid blob or constructor")</TODO> COMPlusThrow(kCustomAttributeFormatException); } return retValue; }
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/vm/vars.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // vars.cpp - Global Var definitions // #include "common.h" #include "vars.hpp" #include "cordbpriv.h" #include "eeprofinterfaces.h" #include "bbsweep.h" #ifndef DACCESS_COMPILE // // Allow use of native images? // bool g_fAllowNativeImages = true; // // Default install library // const WCHAR g_pwBaseLibrary[] = CoreLibName_IL_W; const WCHAR g_pwBaseLibraryName[] = CoreLibName_W; const char g_psBaseLibrary[] = CoreLibName_IL_A; const char g_psBaseLibraryName[] = CoreLibName_A; const char g_psBaseLibrarySatelliteAssemblyName[] = CoreLibSatelliteName_A; Volatile<int32_t> g_TrapReturningThreads; BBSweep g_BBSweep; #ifdef _DEBUG // next two variables are used to enforce an ASSERT in Thread::DbgFindThread // that does not allow g_TrapReturningThreads to creep up unchecked. Volatile<LONG> g_trtChgStamp = 0; Volatile<LONG> g_trtChgInFlight = 0; const char * g_ExceptionFile; // Source of the last thrown exception (COMPLUSThrow()) DWORD g_ExceptionLine; // ... ditto ... void * g_ExceptionEIP; // Managed EIP of the last JITThrow caller. #endif // _DEBUG void * g_LastAccessViolationEIP; // The EIP of the place we last threw an AV. Used to diagnose stress issues. #endif // #ifndef DACCESS_COMPILE GPTR_IMPL(IdDispenser, g_pThinLockThreadIdDispenser); GPTR_IMPL(IdDispenser, g_pModuleIndexDispenser); IBCLogger g_IBCLogger; // For [<I1, etc. up to and including [Object GARY_IMPL(TypeHandle, g_pPredefinedArrayTypes, ELEMENT_TYPE_MAX); GPTR_IMPL(EEConfig, g_pConfig); // configuration data (from the registry) GPTR_IMPL(MethodTable, g_pObjectClass); GPTR_IMPL(MethodTable, g_pRuntimeTypeClass); GPTR_IMPL(MethodTable, g_pCanonMethodTableClass); // System.__Canon GPTR_IMPL(MethodTable, g_pStringClass); GPTR_IMPL(MethodTable, g_pArrayClass); GPTR_IMPL(MethodTable, g_pSZArrayHelperClass); GPTR_IMPL(MethodTable, g_pNullableClass); GPTR_IMPL(MethodTable, g_pByReferenceClass); GPTR_IMPL(MethodTable, g_pExceptionClass); GPTR_IMPL(MethodTable, g_pThreadAbortExceptionClass); GPTR_IMPL(MethodTable, g_pOutOfMemoryExceptionClass); GPTR_IMPL(MethodTable, g_pStackOverflowExceptionClass); GPTR_IMPL(MethodTable, g_pExecutionEngineExceptionClass); GPTR_IMPL(MethodTable, g_pDelegateClass); GPTR_IMPL(MethodTable, g_pMulticastDelegateClass); GPTR_IMPL(MethodTable, g_pValueTypeClass); GPTR_IMPL(MethodTable, g_pEnumClass); GPTR_IMPL(MethodTable, g_pThreadClass); GPTR_IMPL(MethodTable, g_pFreeObjectMethodTable); GPTR_IMPL(MethodTable, g_pOverlappedDataClass); GPTR_IMPL(MethodTable, g_TypedReferenceMT); #ifdef FEATURE_COMINTEROP GPTR_IMPL(MethodTable, g_pBaseCOMObject); #endif GPTR_IMPL(MethodTable, g_pIDynamicInterfaceCastableInterface); #ifdef FEATURE_ICASTABLE GPTR_IMPL(MethodTable, g_pICastableInterface); #endif // FEATURE_ICASTABLE GPTR_IMPL(MethodDesc, g_pObjectFinalizerMD); GPTR_IMPL(Thread,g_pFinalizerThread); GPTR_IMPL(Thread,g_pSuspensionThread); // Global SyncBlock cache GPTR_IMPL(SyncTableEntry,g_pSyncTable); #ifdef STRESS_LOG GPTR_IMPL_INIT(StressLog, g_pStressLog, &StressLog::theLog); #endif #ifdef FEATURE_COMINTEROP // Global RCW cleanup list GPTR_IMPL(RCWCleanupList,g_pRCWCleanupList); #endif // FEATURE_COMINTEROP #ifdef FEATURE_INTEROP_DEBUGGING GVAL_IMPL_INIT(DWORD, g_debuggerWordTLSIndex, TLS_OUT_OF_INDEXES); #endif GVAL_IMPL_INIT(DWORD, g_TlsIndex, TLS_OUT_OF_INDEXES); #ifndef DACCESS_COMPILE // <TODO> @TODO - PROMOTE. </TODO> OBJECTHANDLE g_pPreallocatedOutOfMemoryException; OBJECTHANDLE g_pPreallocatedStackOverflowException; OBJECTHANDLE g_pPreallocatedExecutionEngineException; OBJECTHANDLE g_pPreallocatedSentinelObject; // // // Global System Info // SYSTEM_INFO g_SystemInfo; // Configurable constants used across our spin locks // Initialization here is necessary so that we have meaningful values before the runtime is started // These initial values were selected to match the defaults, but anything reasonable is close enough SpinConstants g_SpinConstants = { 50, // dwInitialDuration 40000, // dwMaximumDuration - ideally (20000 * max(2, numProc)) 3, // dwBackoffFactor 10, // dwRepetitions 0 // dwMonitorSpinCount }; // support for Event Tracing for Windows (ETW) ETW::CEtwTracer * g_pEtwTracer = NULL; #endif // #ifndef DACCESS_COMPILE // // Support for the COM+ Debugger. // GPTR_IMPL(DebugInterface, g_pDebugInterface); // A managed debugger may set this flag to high from out of process. GVAL_IMPL_INIT(DWORD, g_CORDebuggerControlFlags, DBCF_NORMAL_OPERATION); #ifdef DEBUGGING_SUPPORTED GPTR_IMPL(EEDbgInterfaceImpl, g_pEEDbgInterfaceImpl); #endif // DEBUGGING_SUPPORTED #if defined(PROFILING_SUPPORTED_DATA) || defined(PROFILING_SUPPPORTED) // Profiling support HINSTANCE g_pDebuggerDll = NULL; GVAL_IMPL(ProfControlBlock, g_profControlBlock); #endif // defined(PROFILING_SUPPORTED_DATA) || defined(PROFILING_SUPPPORTED) #ifndef DACCESS_COMPILE // Global default for Concurrent GC. The default is value is 1 int g_IGCconcurrent = 1; int g_IGCHoardVM = 0; // // Global state variable indicating if the EE is in its init phase. // bool g_fEEInit = false; // // Global state variables indicating which stage of shutdown we are in // #endif // #ifndef DACCESS_COMPILE // See comments at code:EEShutDown for details on how and why this gets set. Use // code:IsAtProcessExit to read this. GVAL_IMPL(bool, g_fProcessDetach); #ifdef EnC_SUPPORTED GVAL_IMPL_INIT(bool, g_metadataUpdatesApplied, false); #endif GVAL_IMPL_INIT(DWORD, g_fEEShutDown, 0); #ifndef TARGET_UNIX GVAL_IMPL(SIZE_T, g_runtimeLoadedBaseAddress); GVAL_IMPL(SIZE_T, g_runtimeVirtualSize); #endif // !TARGET_UNIX #ifndef DACCESS_COMPILE Volatile<LONG> g_fForbidEnterEE = false; bool g_fManagedAttach = false; bool g_fNoExceptions = false; DWORD g_FinalizerWaiterStatus = 0; // // Do we own the lifetime of the process, ie. is it an EXE? // bool g_fWeControlLifetime = false; #ifdef _DEBUG // The following should only be used for assertions. (Famous last words). bool dbg_fDrasticShutdown = false; #endif bool g_fInControlC = false; #endif // #ifndef DACCESS_COMPILE #ifdef DACCESS_COMPILE void OBJECTHANDLE_EnumMemoryRegions(OBJECTHANDLE handle) { SUPPORTS_DAC; PTR_TADDR ref = PTR_TADDR(handle); if (ref.IsValid()) { ref.EnumMem(); PTR_Object obj = PTR_Object(*ref); if (obj.IsValid()) { obj->EnumMemoryRegions(); } } } void OBJECTREF_EnumMemoryRegions(OBJECTREF ref) { if (ref.IsValid()) { ref->EnumMemoryRegions(); } } #endif // #ifdef DACCESS_COMPILE #ifndef DACCESS_COMPILE // // We need the following to be the compiler's notion of volatile. // extern "C" RAW_KEYWORD(volatile) const GSCookie s_gsCookie = 0; #else __GlobalVal< GSCookie > s_gsCookie(&g_dacGlobals.dac__s_gsCookie); #endif //!DACCESS_COMPILE //==============================================================================
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // vars.cpp - Global Var definitions // #include "common.h" #include "vars.hpp" #include "cordbpriv.h" #include "eeprofinterfaces.h" #include "bbsweep.h" #ifndef DACCESS_COMPILE // // Allow use of native images? // bool g_fAllowNativeImages = true; // // Default install library // const WCHAR g_pwBaseLibrary[] = CoreLibName_IL_W; const WCHAR g_pwBaseLibraryName[] = CoreLibName_W; const char g_psBaseLibrary[] = CoreLibName_IL_A; const char g_psBaseLibraryName[] = CoreLibName_A; const char g_psBaseLibrarySatelliteAssemblyName[] = CoreLibSatelliteName_A; Volatile<int32_t> g_TrapReturningThreads; BBSweep g_BBSweep; #ifdef _DEBUG // next two variables are used to enforce an ASSERT in Thread::DbgFindThread // that does not allow g_TrapReturningThreads to creep up unchecked. Volatile<LONG> g_trtChgStamp = 0; Volatile<LONG> g_trtChgInFlight = 0; const char * g_ExceptionFile; // Source of the last thrown exception (COMPLUSThrow()) DWORD g_ExceptionLine; // ... ditto ... void * g_ExceptionEIP; // Managed EIP of the last JITThrow caller. #endif // _DEBUG void * g_LastAccessViolationEIP; // The EIP of the place we last threw an AV. Used to diagnose stress issues. #endif // #ifndef DACCESS_COMPILE GPTR_IMPL(IdDispenser, g_pThinLockThreadIdDispenser); GPTR_IMPL(IdDispenser, g_pModuleIndexDispenser); IBCLogger g_IBCLogger; // For [<I1, etc. up to and including [Object GARY_IMPL(TypeHandle, g_pPredefinedArrayTypes, ELEMENT_TYPE_MAX); GPTR_IMPL(EEConfig, g_pConfig); // configuration data (from the registry) GPTR_IMPL(MethodTable, g_pObjectClass); GPTR_IMPL(MethodTable, g_pRuntimeTypeClass); GPTR_IMPL(MethodTable, g_pCanonMethodTableClass); // System.__Canon GPTR_IMPL(MethodTable, g_pStringClass); GPTR_IMPL(MethodTable, g_pArrayClass); GPTR_IMPL(MethodTable, g_pSZArrayHelperClass); GPTR_IMPL(MethodTable, g_pNullableClass); GPTR_IMPL(MethodTable, g_pByReferenceClass); GPTR_IMPL(MethodTable, g_pExceptionClass); GPTR_IMPL(MethodTable, g_pThreadAbortExceptionClass); GPTR_IMPL(MethodTable, g_pOutOfMemoryExceptionClass); GPTR_IMPL(MethodTable, g_pStackOverflowExceptionClass); GPTR_IMPL(MethodTable, g_pExecutionEngineExceptionClass); GPTR_IMPL(MethodTable, g_pDelegateClass); GPTR_IMPL(MethodTable, g_pMulticastDelegateClass); GPTR_IMPL(MethodTable, g_pValueTypeClass); GPTR_IMPL(MethodTable, g_pEnumClass); GPTR_IMPL(MethodTable, g_pThreadClass); GPTR_IMPL(MethodTable, g_pFreeObjectMethodTable); GPTR_IMPL(MethodTable, g_pOverlappedDataClass); GPTR_IMPL(MethodTable, g_TypedReferenceMT); #ifdef FEATURE_COMINTEROP GPTR_IMPL(MethodTable, g_pBaseCOMObject); #endif GPTR_IMPL(MethodTable, g_pIDynamicInterfaceCastableInterface); #ifdef FEATURE_ICASTABLE GPTR_IMPL(MethodTable, g_pICastableInterface); #endif // FEATURE_ICASTABLE GPTR_IMPL(MethodDesc, g_pObjectFinalizerMD); GPTR_IMPL(Thread,g_pFinalizerThread); GPTR_IMPL(Thread,g_pSuspensionThread); // Global SyncBlock cache GPTR_IMPL(SyncTableEntry,g_pSyncTable); #ifdef STRESS_LOG GPTR_IMPL_INIT(StressLog, g_pStressLog, &StressLog::theLog); #endif #ifdef FEATURE_COMINTEROP // Global RCW cleanup list GPTR_IMPL(RCWCleanupList,g_pRCWCleanupList); #endif // FEATURE_COMINTEROP #ifdef FEATURE_INTEROP_DEBUGGING GVAL_IMPL_INIT(DWORD, g_debuggerWordTLSIndex, TLS_OUT_OF_INDEXES); #endif GVAL_IMPL_INIT(DWORD, g_TlsIndex, TLS_OUT_OF_INDEXES); #ifndef DACCESS_COMPILE // <TODO> @TODO - PROMOTE. </TODO> OBJECTHANDLE g_pPreallocatedOutOfMemoryException; OBJECTHANDLE g_pPreallocatedStackOverflowException; OBJECTHANDLE g_pPreallocatedExecutionEngineException; OBJECTHANDLE g_pPreallocatedSentinelObject; // // // Global System Info // SYSTEM_INFO g_SystemInfo; // Configurable constants used across our spin locks // Initialization here is necessary so that we have meaningful values before the runtime is started // These initial values were selected to match the defaults, but anything reasonable is close enough SpinConstants g_SpinConstants = { 50, // dwInitialDuration 40000, // dwMaximumDuration - ideally (20000 * max(2, numProc)) 3, // dwBackoffFactor 10, // dwRepetitions 0 // dwMonitorSpinCount }; // support for Event Tracing for Windows (ETW) ETW::CEtwTracer * g_pEtwTracer = NULL; #endif // #ifndef DACCESS_COMPILE // // Support for the COM+ Debugger. // GPTR_IMPL(DebugInterface, g_pDebugInterface); // A managed debugger may set this flag to high from out of process. GVAL_IMPL_INIT(DWORD, g_CORDebuggerControlFlags, DBCF_NORMAL_OPERATION); #ifdef DEBUGGING_SUPPORTED GPTR_IMPL(EEDbgInterfaceImpl, g_pEEDbgInterfaceImpl); #endif // DEBUGGING_SUPPORTED #if defined(PROFILING_SUPPORTED_DATA) || defined(PROFILING_SUPPPORTED) // Profiling support HINSTANCE g_pDebuggerDll = NULL; GVAL_IMPL(ProfControlBlock, g_profControlBlock); #endif // defined(PROFILING_SUPPORTED_DATA) || defined(PROFILING_SUPPPORTED) #ifndef DACCESS_COMPILE // Global default for Concurrent GC. The default is value is 1 int g_IGCconcurrent = 1; int g_IGCHoardVM = 0; // // Global state variable indicating if the EE is in its init phase. // bool g_fEEInit = false; // // Global state variables indicating which stage of shutdown we are in // #endif // #ifndef DACCESS_COMPILE // See comments at code:EEShutDown for details on how and why this gets set. Use // code:IsAtProcessExit to read this. GVAL_IMPL(bool, g_fProcessDetach); #ifdef EnC_SUPPORTED GVAL_IMPL_INIT(bool, g_metadataUpdatesApplied, false); #endif GVAL_IMPL_INIT(DWORD, g_fEEShutDown, 0); #ifndef TARGET_UNIX GVAL_IMPL(SIZE_T, g_runtimeLoadedBaseAddress); GVAL_IMPL(SIZE_T, g_runtimeVirtualSize); #endif // !TARGET_UNIX #ifndef DACCESS_COMPILE Volatile<LONG> g_fForbidEnterEE = false; bool g_fManagedAttach = false; bool g_fNoExceptions = false; DWORD g_FinalizerWaiterStatus = 0; // // Do we own the lifetime of the process, ie. is it an EXE? // bool g_fWeControlLifetime = false; #ifdef _DEBUG // The following should only be used for assertions. (Famous last words). bool dbg_fDrasticShutdown = false; #endif bool g_fInControlC = false; #endif // #ifndef DACCESS_COMPILE #ifdef DACCESS_COMPILE void OBJECTHANDLE_EnumMemoryRegions(OBJECTHANDLE handle) { SUPPORTS_DAC; PTR_TADDR ref = PTR_TADDR(handle); if (ref.IsValid()) { ref.EnumMem(); PTR_Object obj = PTR_Object(*ref); if (obj.IsValid()) { obj->EnumMemoryRegions(); } } } void OBJECTREF_EnumMemoryRegions(OBJECTREF ref) { if (ref.IsValid()) { ref->EnumMemoryRegions(); } } #endif // #ifdef DACCESS_COMPILE #ifndef DACCESS_COMPILE // // We need the following to be the compiler's notion of volatile. // extern "C" RAW_KEYWORD(volatile) const GSCookie s_gsCookie = 0; #else __GlobalVal< GSCookie > s_gsCookie(&g_dacGlobals.dac__s_gsCookie); #endif //!DACCESS_COMPILE //==============================================================================
-1
dotnet/runtime
66,474
Update repo CMake configuration to target /W4 by default
Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
AaronRobinsonMSFT
2022-03-10T20:20:31Z
2022-03-11T04:59:21Z
ba5207b0f4054fe8963fc3e87c2bd32907516961
5ce2b9f860f7a84c3059650bb67817d59d8f4953
Update repo CMake configuration to target /W4 by default. Contributes to https://github.com/dotnet/runtime/issues/66154 Disable all failing warnings. The ones listed in the SDL issue above will gradually be re-enabled. /cc @elinor-fung @am11 @jkotas
./src/coreclr/pal/tests/palsuite/c_runtime/islower/test1/test1.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test1.c ** ** Purpose: Tests the PAL implementation of the islower function ** Check that a number of characters return the correct ** values for whether they are lower case or not. ** ** **===================================================================*/ #include <palsuite.h> struct testCase { int CorrectResult; int character; }; PALTEST(c_runtime_islower_test1_paltest_islower_test1, "c_runtime/islower/test1/paltest_islower_test1") { int result; int i; struct testCase testCases[] = { {1, 'a'}, /* Basic cases */ {1, 'z'}, {0, 'B'}, /* Lower case */ {0, '?'}, /* Characters without case */ {0, 230}, {0, '5'} }; if (PAL_Initialize(argc, argv)) { return FAIL; } /* Loop through each case. Check to see if each is lower case or not. */ for(i = 0; i < sizeof(testCases) / sizeof(struct testCase); i++) { result = islower(testCases[i].character); /* The return value is 'non-zero' for success. This if condition * will still work if that non-zero isn't just 1 */ if ( ((testCases[i].CorrectResult == 1) && (result == 0)) || ( (testCases[i].CorrectResult == 0) && (result != 0) )) { Fail("ERROR: islower returned %i instead of %i for " "character %c.\n", result, testCases[i].CorrectResult, testCases[i].character); } } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test1.c ** ** Purpose: Tests the PAL implementation of the islower function ** Check that a number of characters return the correct ** values for whether they are lower case or not. ** ** **===================================================================*/ #include <palsuite.h> struct testCase { int CorrectResult; int character; }; PALTEST(c_runtime_islower_test1_paltest_islower_test1, "c_runtime/islower/test1/paltest_islower_test1") { int result; int i; struct testCase testCases[] = { {1, 'a'}, /* Basic cases */ {1, 'z'}, {0, 'B'}, /* Lower case */ {0, '?'}, /* Characters without case */ {0, 230}, {0, '5'} }; if (PAL_Initialize(argc, argv)) { return FAIL; } /* Loop through each case. Check to see if each is lower case or not. */ for(i = 0; i < sizeof(testCases) / sizeof(struct testCase); i++) { result = islower(testCases[i].character); /* The return value is 'non-zero' for success. This if condition * will still work if that non-zero isn't just 1 */ if ( ((testCases[i].CorrectResult == 1) && (result == 0)) || ( (testCases[i].CorrectResult == 0) && (result != 0) )) { Fail("ERROR: islower returned %i instead of %i for " "character %c.\n", result, testCases[i].CorrectResult, testCases[i].character); } } PAL_Terminate(); return PASS; }
-1